SGTY 2 هفته پیش
والد
کامیت
754ab6b48a

+ 5 - 1
src/knowledge/router/graph_api.py

@@ -12,7 +12,7 @@ class KgQuery(BaseModel):
     label_name: str
     input_str: str
 
-@router.post("/getGraph")
+@router.post("/knowledge/getGraph")
 async def get_graph(kg_query: KgQuery) -> StandardResponse:
     db = next(get_db())
     kg_graph_service = KgGraphService(db)
@@ -22,4 +22,8 @@ async def get_graph(kg_query: KgQuery) -> StandardResponse:
         data=graph_data
     )
 
+
+
+
+
 graph_router = router

+ 197 - 0
src/knowledge/router/knowledge_saas.py

@@ -0,0 +1,197 @@
+from fastapi import APIRouter, Depends, HTTPException
+from typing import Optional, List
+from pydantic import BaseModel
+from ..model.response import StandardResponse
+from ..db.session import get_db
+from sqlalchemy.orm import Session
+
+from ..service.kg_node_service import KGNodeService
+from ..service.trunks_service import TrunksService
+import logging
+
+
+router = APIRouter(tags=["SaaS Knowledge Base"])
+
+logger = logging.getLogger(__name__)
+
+class PaginatedSearchRequest(BaseModel):
+    keyword: Optional[str] = None
+    category: Optional[str] = None
+    distance: Optional[float] = None
+    pageNo: int = 1
+    limit: int = 10
+    knowledge_ids: Optional[List[str]] = None
+
+class NodePaginatedSearchRequest(BaseModel):
+    name: str
+    category: Optional[str] = None
+    pageNo: int = 1
+    limit: int = 10  
+
+class NodeCreateRequest(BaseModel):
+    name: str
+    category: str
+    layout: Optional[str] = None
+    version: Optional[str] = None
+    embedding: Optional[List[float]] = None
+
+class NodeUpdateRequest(BaseModel):
+    layout: Optional[str] = None
+    version: Optional[str] = None
+    status: Optional[int] = None
+    embedding: Optional[List[float]] = None
+
+class VectorSearchRequest(BaseModel):
+    text: str
+    limit: int = 10
+    type: Optional[str] = None
+
+class NodeRelationshipRequest(BaseModel):
+    src_id: int
+
+@router.post("/kgrt_api/saas/nodes/paginated_search", response_model=StandardResponse)
+@router.post("/knowledge/saas/nodes/paginated_search", response_model=StandardResponse)
+async def paginated_search(
+    payload: PaginatedSearchRequest,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = KGNodeService(db)
+        search_params = {
+            'keyword': payload.keyword,
+            'category': payload.category,
+            'pageNo': payload.pageNo,
+            'limit': payload.limit,
+            'knowledge_ids': payload.knowledge_ids,
+            'distance': payload.distance,
+            'load_props': True
+        }
+        result = service.paginated_search(search_params)
+        
+        # 定义prop_title的排序顺序
+        prop_title_order = [
+            '基础信息', '概述', '病因学', '流行病学', '发病机制', '病理学',
+            '临床表现', '辅助检查', '诊断', '鉴别诊断', '并发症', '治疗', '护理', '预后', '预防'
+        ]
+        
+        # 处理每个记录的props,过滤并排序
+        for record in result['records']:
+            if 'props' in record:
+                # 只保留指定的prop_title
+                filtered_props = [prop for prop in record['props'] if prop.get('prop_title') in prop_title_order]
+                
+                # 按照指定顺序排序
+                sorted_props = sorted(
+                    filtered_props,
+                    key=lambda x: prop_title_order.index(x.get('prop_title')) if x.get('prop_title') in prop_title_order else len(prop_title_order)
+                )
+                
+                # 更新记录中的props
+                record['props'] = sorted_props
+        
+        return StandardResponse(
+            success=True,
+            data={
+                'records': result['records'],
+                'pagination': result['pagination']
+            }
+        )
+    except Exception as e:
+        logger.error(f"分页查询失败: {str(e)}")
+        raise HTTPException(
+            status_code=500,
+            detail=StandardResponse(
+                success=False,
+                error_code=500,
+                error_msg=str(e)
+            )
+        )
+
+@router.post("/nodes", response_model=StandardResponse)
+async def create_node(
+    payload: NodeCreateRequest,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.create_node(payload.dict())
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"创建节点失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+@router.get("/nodes/{node_id}", response_model=StandardResponse)
+async def get_node(
+    node_id: int,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.get_node(node_id)
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"获取节点失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+@router.put("/nodes/{node_id}", response_model=StandardResponse)
+async def update_node(
+    node_id: int,
+    payload: NodeUpdateRequest,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.update_node(node_id, payload.dict(exclude_unset=True))
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"更新节点失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+@router.post('/kgrt_api/saas/trunks/vector_search', response_model=StandardResponse)
+@router.post('/knowledge/saas/trunks/vector_search', response_model=StandardResponse)
+async def vector_search(
+    payload: VectorSearchRequest,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.search_by_vector(
+            payload.text,
+            payload.limit,
+            type=payload.type
+        )
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"向量搜索失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+@router.get('/kgrt_api/saas/trunks/{trunk_id}', response_model=StandardResponse)
+@router.get('/knowledge/saas/trunks/{trunk_id}', response_model=StandardResponse)
+async def get_trunk(
+    trunk_id: int,
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.get_trunk_by_id(trunk_id)
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"获取trunk详情失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+@router.post('/kgrt_api/saas/trunks/{trunk_id}/highlight', response_model=StandardResponse)
+@router.post('/knowledge/saas/trunks/{trunk_id}/highlight', response_model=StandardResponse)
+async def highlight(
+    trunk_id: int,
+    targetSentences: List[str],
+    db: Session = Depends(get_db)
+):
+    try:
+        service = TrunksService()
+        result = service.highlight(trunk_id, targetSentences)
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"获取trunk高亮信息失败: {str(e)}")
+        raise HTTPException(500, detail=StandardResponse.error(str(e)))
+
+saas_kb_router = router

+ 756 - 94
src/knowledge/router/text_search.py

@@ -4,15 +4,19 @@ from typing import List, Optional
 from ..service.trunks_service import TrunksService
 from ..utils.sentence_util import SentenceUtil
 
+from ..utils.vector_distance import VectorDistance
 from ..model.response import StandardResponse
-
+from ..utils.vectorizer import Vectorizer
+# from utils.find_text_in_pdf import find_text_in_pdf
 import os
-
+DISTANCE_THRESHOLD = 0.73
 import logging
-
+import time
 from ..db.session import get_db
 from sqlalchemy.orm import Session
-
+from ..service.kg_node_service import KGNodeService
+from ..service.kg_prop_service import KGPropService
+from ..service.kg_edge_service import KGEdgeService
 
 from cachetools import TTLCache
 
@@ -21,86 +25,351 @@ from ..utils.text_similarity import TextSimilarityFinder
 
 logger = logging.getLogger(__name__)
 router = APIRouter(tags=["Text Search"])
-DISTANCE_THRESHOLD = 0.73
-# 创建全局缓存实例
-#cache = TTLCache(maxsize=1000, ttl=3600)
-
-class FindSimilarTexts(BaseModel):
-    keywords:Optional[List[str]] = None
-    search_text: str
-
-@router.post("/knowledge/text/find_similar_texts", response_model=StandardResponse)
-async def find_similar_texts(request: FindSimilarTexts, db: Session = Depends(get_db)):
-    trunks_service = TrunksService()
-    search_text = request.search_text
-    if request.keywords:
-        search_text = f"{request.keywords}:{search_text}"
-    # 使用向量搜索获取相似内容
-    search_results = trunks_service.search_by_vector(
-        text=search_text,
-        limit=500,
-        type='trunk',
-        distance=0.7
-    )
-
-    # 准备语料库数据
-    trunk_texts = []
-    trunk_ids = []
 
-    # 创建一个字典来存储trunk的详细信息
-    trunk_details = {}
-    if len(search_results) == 0:
-        return StandardResponse(success=True)
-    for trunk in search_results:
-        trunk_texts.append(trunk.get('content'))
-        trunk_ids.append(trunk.get('id'))
-        # 缓存trunk的详细信息
-        trunk_details[trunk.get('id')] = {
-            'id': trunk.get('id'),
-            'content': trunk.get('content'),
-            'file_path': trunk.get('file_path'),
-            'title': trunk.get('title'),
-            'referrence': trunk.get('referrence'),
-            'page_no': trunk.get('page_no')
+# 创建全局缓存实例
+cache = TTLCache(maxsize=1000, ttl=3600)
+
+class TextSearchRequest(BaseModel):
+    text: str
+    conversation_id: Optional[str] = None
+    need_convert: Optional[bool] = False
+
+class TextCompareRequest(BaseModel):
+    sentence: str
+    text: str
+
+class TextMatchRequest(BaseModel):
+    text: str = Field(..., min_length=1, max_length=10000, description="需要搜索的文本内容")
+
+    @validator('text')
+    def validate_text(cls, v):
+        # 保留所有可打印字符、换行符和中文字符
+        v = ''.join(char for char in v if char.isprintable() or char in '\n\r')
+        
+        # 转义JSON特殊字符
+        # 先处理反斜杠,避免后续转义时出现问题
+        v = v.replace('\\', '\\\\')
+        # 处理引号和其他特殊字符
+        v = v.replace('"', '\\"')
+        v = v.replace('/', '\\/')
+        # 处理控制字符
+        v = v.replace('\n', '\\n')
+        v = v.replace('\r', '\\r')
+        v = v.replace('\t', '\\t')
+        v = v.replace('\b', '\\b')
+        v = v.replace('\f', '\\f')
+        # 处理Unicode转义
+        # v = v.replace('\u', '\\u')
+        
+        return v
+
+class TextCompareMultiRequest(BaseModel):
+    origin: str
+    similar: str
+
+class NodePropsSearchRequest(BaseModel):
+    node_id: int
+    props_ids: List[int]
+    symptoms: Optional[List[str]] = None
+
+@router.post("/kgrt_api/text/clear_cache", response_model=StandardResponse)
+async def clear_cache():
+    try:
+        # 清除全局缓存
+        cache.clear()
+        return StandardResponse(success=True, data={"message": "缓存已清除"})
+    except Exception as e:
+        logger.error(f"清除缓存失败: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+@router.post("/kgrt_api/text/search", response_model=StandardResponse)
+@router.post("/knowledge/text/search", response_model=StandardResponse)
+async def search_text(request: TextSearchRequest):
+    try:
+        #判断request.text是否为json格式,如果是,使用JsonToText的convert方法转换为text
+        if request.text.startswith('{') and request.text.endswith('}'):
+            from ..utils.json_to_text import JsonToTextConverter
+            converter = JsonToTextConverter()
+            request.text = converter.convert(request.text)
+
+        # 使用TextSplitter拆分文本
+        sentences = SentenceUtil.split_text(request.text)
+        if not sentences:
+            return StandardResponse(success=True, data={"answer": "", "references": []})
+        
+        # 初始化服务和结果列表
+        trunks_service = TrunksService()
+        result_sentences = []
+        all_references = []
+        reference_index = 1
+        
+        # 根据conversation_id获取缓存结果
+        cached_results = trunks_service.get_cached_result(request.conversation_id) if request.conversation_id else []
+        
+        for sentence in sentences:
+            # if request.need_convert:
+            sentence = sentence.replace("\n", "<br>")
+            if len(sentence) < 10:
+                result_sentences.append(sentence)
+                continue
+            if cached_results:
+                # 如果有缓存结果,计算向量距离
+                min_distance = float('inf')
+                best_result = None
+                sentence_vector = Vectorizer.get_embedding(sentence)
+                
+                for cached_result in cached_results:
+                    content_vector = cached_result['embedding']
+                    distance = VectorDistance.calculate_distance(sentence_vector, content_vector)
+                    if distance < min_distance:
+                        min_distance = distance
+                        best_result = {**cached_result, 'distance': distance}
+                        
+                
+                if best_result and best_result['distance'] < DISTANCE_THRESHOLD:
+                    search_results = [best_result]
+                else:
+                    search_results = []
+            else:
+                # 如果没有缓存结果,进行向量搜索
+                search_results = trunks_service.search_by_vector(
+                    text=sentence,
+                    limit=1,
+                    type='trunk'
+                )
+            
+            # 处理搜索结果
+            for search_result in search_results:
+                distance = search_result.get("distance", DISTANCE_THRESHOLD)
+                if distance >= DISTANCE_THRESHOLD:
+                    result_sentences.append(sentence)
+                    continue
+                
+                # 检查是否已存在相同引用
+                existing_ref = next((ref for ref in all_references if ref["id"] == search_result["id"]), None)
+                current_index = reference_index
+                if existing_ref:
+                    current_index = int(existing_ref["index"])
+                else:
+                    # 添加到引用列表
+                    # 从referrence中提取文件名
+                    file_name = ""
+                    referrence = search_result.get("referrence", "")
+                    if referrence and "/books/" in referrence:
+                        file_name = referrence.split("/books/")[-1]
+                        # 去除文件扩展名
+                        file_name = os.path.splitext(file_name)[0]
+
+                    reference = {
+                        "index": str(reference_index),
+                        "id": search_result["id"],
+                        "content": search_result["content"],
+                        "file_path": search_result.get("file_path", ""),
+                        "title": search_result.get("title", ""),
+                        "distance": distance,
+                        "file_name": file_name,
+                        "referrence": referrence
+                    }
+                    
+                    
+                    all_references.append(reference)
+                    reference_index += 1
+                
+                # 添加引用标记
+                if sentence.endswith('<br>'):
+                    # 如果有多个<br>,在所有<br>前添加^[current_index]^
+                    result_sentence = sentence.replace('<br>', f'^[{current_index}]^<br>')
+                else:
+                    # 直接在句子末尾添加^[current_index]^
+                    result_sentence = f'{sentence}^[{current_index}]^'
+                
+                result_sentences.append(result_sentence)
+     
+        # 组装返回数据
+        response_data = {
+            "answer": result_sentences,
+            "references": all_references
         }
+        
+        return StandardResponse(success=True, data=response_data)
+        
+    except Exception as e:
+        logger.error(f"Text search failed: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+@router.post("/kgrt_api/text/match", response_model=StandardResponse)
+@router.post("/knowledge/text/match", response_model=StandardResponse)
+async def match_text(request: TextCompareRequest):
+    try:
+        sentences = SentenceUtil.split_text(request.text)
+        sentence_vector = Vectorizer.get_embedding(request.sentence)
+        min_distance = float('inf')
+        best_sentence = ""
+        result_sentences = []
+        for temp in sentences:
+            result_sentences.append(temp)
+            if len(temp) < 10:
+                continue
+            temp_vector = Vectorizer.get_embedding(temp)
+            distance = VectorDistance.calculate_distance(sentence_vector, temp_vector)
+            if distance < min_distance and distance < DISTANCE_THRESHOLD:
+                min_distance = distance
+                best_sentence = temp
+
+        for i in range(len(result_sentences)):
+            result_sentences[i] = {"sentence": result_sentences[i], "matched": False}
+            if result_sentences[i]["sentence"] == best_sentence:
+                result_sentences[i]["matched"] = True    
+                
+        return StandardResponse(success=True, records=result_sentences)
+    except Exception as e:
+        logger.error(f"Text comparison failed: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+@router.post("/kgrt_api/text/mr_search", response_model=StandardResponse)
+@router.post("/knowledge/text/mr_search", response_model=StandardResponse)
+async def mr_search_text_content(request: TextMatchRequest):
+    try:
+        # 初始化服务
+        trunks_service = TrunksService()
+        
+        # 获取文本向量并搜索相似内容
+        search_results = trunks_service.search_by_vector(
+            text=request.text,
+            limit=10,
+            type="mr"
+        )
 
-    # 初始化TextSimilarityFinder并加载语料库
-    similarity_finder = TextSimilarityFinder(method='tfidf', use_jieba=True)
-    similarity_finder.load_corpus(trunk_texts, trunk_ids)
-
-    similar_results = similarity_finder.find_most_similar(search_text, top_n=1)
-    prop_result = {}
-    # 处理搜索结果
-    if similar_results and similar_results[0]['similarity'] >= 0.3:  # 设置相似度阈值
-        # 获取最相似的文本对应的trunk_id
-        trunk_id = similar_results[0]['path']
-
-        # 从缓存中获取trunk详细信息
-        trunk_info = trunk_details.get(trunk_id)
+        # 处理搜索结果
+        records = []
+        for result in search_results:
+            distance = result.get("distance", DISTANCE_THRESHOLD)
+            if distance >= DISTANCE_THRESHOLD:
+                continue
 
-        if trunk_info:
-            search_result = {
-                **trunk_info,
-                'distance': similar_results[0]['similarity']  # 转换相似度为距离
+            # 添加到引用列表
+            record = {
+                "content": result["content"],
+                "file_path": result.get("file_path", ""),
+                "title": result.get("title", ""),
+                "distance": distance,
             }
+            records.append(record)
 
-            reference, _ = _process_search_result(search_result, 1)
-            prop_result["references"] = [reference]
-            prop_result["answer"] = [{
-                 "sentence": request.search_text,
-                 "index": "1"
-            }]
-    else:
-        # 如果整体搜索没有找到匹配结果,则进行句子拆分搜索
-        sentences = SentenceUtil.split_text(request.search_text, 10)
-        result_sentences, references = _process_sentence_search_keywords(
-            sentences, trunks_service,keywords=request.keywords
-        )
-        if references:
-            prop_result["references"] = references
-        if result_sentences:
-            prop_result["answer"] = result_sentences
-    return StandardResponse(success=True,data=prop_result)
+        # 组装返回数据
+        response_data = {
+            "records": records
+        }
+        
+        return StandardResponse(success=True, data=response_data)
+        
+    except Exception as e:
+        logger.error(f"Mr search failed: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+@router.post("/kgrt_api/text/mr_match", response_model=StandardResponse)
+@router.post("/knowledge/text/mr_match", response_model=StandardResponse)
+async def compare_text(request: TextCompareMultiRequest):
+    start_time = time.time()
+    try:
+        # 拆分两段文本
+        origin_sentences = SentenceUtil.split_text(request.origin)
+        similar_sentences = SentenceUtil.split_text(request.similar)
+        end_time = time.time()
+        logger.info(f"mr_match接口处理文本耗时: {(end_time - start_time) * 1000:.2f}ms")
+        
+        # 初始化结果列表
+        origin_results = []
+        
+        # 过滤短句并预计算向量
+        valid_origin_sentences = [(sent, len(sent) >= 10) for sent in origin_sentences]
+        valid_similar_sentences = [(sent, len(sent) >= 10) for sent in similar_sentences]
+        
+        # 初始化similar_results,所有matched设为False
+        similar_results = [{"sentence": sent, "matched": False} for sent, _ in valid_similar_sentences]
+        
+        # 批量获取向量
+        origin_vectors = {}
+        similar_vectors = {}
+        origin_batch = [sent for sent, is_valid in valid_origin_sentences if is_valid]
+        similar_batch = [sent for sent, is_valid in valid_similar_sentences if is_valid]
+        
+        if origin_batch:
+            origin_embeddings = [Vectorizer.get_embedding(sent) for sent in origin_batch]
+            origin_vectors = dict(zip(origin_batch, origin_embeddings))
+        
+        if similar_batch:
+            similar_embeddings = [Vectorizer.get_embedding(sent) for sent in similar_batch]
+            similar_vectors = dict(zip(similar_batch, similar_embeddings))
+
+        end_time = time.time()
+        logger.info(f"mr_match接口处理向量耗时: {(end_time - start_time) * 1000:.2f}ms") 
+        # 处理origin文本
+        for origin_sent, is_valid in valid_origin_sentences:
+            if not is_valid:
+                origin_results.append({"sentence": origin_sent, "matched": False})
+                continue
+            
+            origin_vector = origin_vectors[origin_sent]
+            matched = False
+            
+            # 优化的相似度计算
+            for i, similar_result in enumerate(similar_results):
+                if similar_result["matched"]:
+                    continue
+                    
+                similar_sent = similar_result["sentence"]
+                if len(similar_sent) < 10:
+                    continue
+                    
+                similar_vector = similar_vectors.get(similar_sent)
+                if not similar_vector:
+                    continue
+                    
+                distance = VectorDistance.calculate_distance(origin_vector, similar_vector)
+                if distance < DISTANCE_THRESHOLD:
+                    matched = True
+                    similar_results[i]["matched"] = True
+                    break
+            
+            origin_results.append({"sentence": origin_sent, "matched": matched})
+        
+        response_data = {
+            "origin": origin_results,
+            "similar": similar_results
+        }
+        
+        end_time = time.time()
+        logger.info(f"mr_match接口耗时: {(end_time - start_time) * 1000:.2f}ms")
+        return StandardResponse(success=True, data=response_data)
+    except Exception as e:
+        end_time = time.time()
+        logger.error(f"Text comparison failed: {str(e)}")
+        logger.info(f"mr_match接口耗时: {(end_time - start_time) * 1000:.2f}ms")
+        raise HTTPException(status_code=500, detail=str(e))
+
+def _check_cache(node_id: int) -> Optional[dict]:
+    """检查并返回缓存结果"""
+    cache_key = f"xunzheng_{node_id}"
+    cached_result = cache.get(cache_key)
+    if cached_result:
+        logger.info(f"从缓存获取结果,node_id: {node_id}")
+        return cached_result
+    return None
+
+def _get_node_info(node_service: KGNodeService, node_id: int) -> dict:
+    """获取并验证节点信息"""
+    node = node_service.get_node(node_id)
+    if not node:
+        raise ValueError(f"节点不存在: {node_id}")
+    return {
+        "id": node_id,
+        "name": node.get('name', ''),
+        "category": node.get('category', ''),
+        "props": [],
+        "files": [],
+        "distance": 0
+    }
 
 def _process_search_result(search_result: dict, reference_index: int) -> tuple[dict, str]:
     """处理搜索结果,返回引用信息和文件名"""
@@ -123,27 +392,45 @@ def _process_search_result(search_result: dict, reference_index: int) -> tuple[d
     }
     return reference, file_name
 
-
-def _process_sentence_search(node_name: str, prop_title: str, sentences: list, trunks_service: TrunksService) -> tuple[
-    list, list]:
+def _get_file_type(file_name: str) -> str:
+    """根据文件名确定文件类型"""
+    file_name_lower = file_name.lower()
+    if file_name_lower.endswith(".pdf"):
+        return "pdf"
+    elif file_name_lower.endswith((".doc", ".docx")):
+        return "doc"
+    elif file_name_lower.endswith((".xls", ".xlsx")):
+        return "excel"
+    elif file_name_lower.endswith((".ppt", ".pptx")):
+        return "ppt"
+    return "other"
+
+def _process_sentence_search(node_name: str, prop_title: str, sentences: list, trunks_service: TrunksService) -> tuple[list, list]:
     keywords = [node_name, prop_title] if node_name and prop_title else None
-    return _process_sentence_search_keywords(sentences, trunks_service, keywords=keywords)
-
-
-def _process_sentence_search_keywords(sentences: list, trunks_service: TrunksService,
-                                      keywords: Optional[List[str]] = None) -> tuple[list, list]:
+    return _process_sentence_search_keywords(sentences, trunks_service,keywords=keywords)
+    
+def _process_sentence_search_keywords(sentences: list, trunks_service: TrunksService,keywords: Optional[List[str]] = None) -> tuple[list, list]:
     """处理句子搜索,返回结果句子和引用列表"""
     result_sentences = []
     all_references = []
     reference_index = 1
     i = 0
-
+    
     while i < len(sentences):
         sentence = sentences[i]
         search_text = sentence
         if keywords:
             search_text = f"{keywords}:{sentence}"
-
+        # if len(sentence) < 10 and i + 1 < len(sentences):
+        #     next_sentence = sentences[i + 1]
+        #     # result_sentences.append({"sentence": sentence, "flag": ""})
+        #     search_text = f"{node_name}:{prop_title}:{sentence} {next_sentence}"
+        #     i += 1
+        # elif len(sentence) < 10:
+        #     result_sentences.append({"sentence": sentence, "flag": ""})
+        #     i += 1
+        #     continue
+        # else:
         i += 1
 
         # 使用向量搜索获取相似内容
@@ -180,11 +467,11 @@ def _process_sentence_search_keywords(sentences: list, trunks_service: TrunksSer
 
         # 使用TextSimilarityFinder进行相似度匹配
         similar_results = similarity_finder.find_most_similar(search_text, top_n=1)
-
+        
         if not similar_results:  # 设置相似度阈值
-            result_sentences.append({"sentence": sentence, "index": ""})
+            result_sentences.append({"sentence": sentence, "flag": ""})
             continue
-
+        
         # 获取最相似的文本对应的trunk_id
         trunk_id = similar_results[0]['path']
 
@@ -198,7 +485,7 @@ def _process_sentence_search_keywords(sentences: list, trunks_service: TrunksSer
             }
             # 检查相似度是否达到阈值
             if search_result['distance'] >= DISTANCE_THRESHOLD:
-                result_sentences.append({"sentence": sentence, "index": ""})
+                result_sentences.append({"sentence": sentence, "flag": ""})
                 continue
                 # 检查是否已存在相同引用
             existing_ref = next((ref for ref in all_references if ref["id"] == search_result["id"]), None)
@@ -209,7 +496,382 @@ def _process_sentence_search_keywords(sentences: list, trunks_service: TrunksSer
                 all_references.append(reference)
                 reference_index += 1
 
-            result_sentences.append({"sentence": sentence, "index": str(current_index)})
-
+            result_sentences.append({"sentence": sentence, "flag": str(current_index)})
+    
     return result_sentences, all_references
+
+def _mark_symptoms(text: str, symptom_list: List[str]) -> str:
+    """处理症状标记"""
+    if not symptom_list:
+        return text
+        
+    marked_sentence = text
+    # 创建一个标记位置的列表,记录每个位置是否已被标记
+    marked_positions = [False] * len(marked_sentence)
+    
+    # 创建一个列表来存储已处理的症状
+    processed_symptoms = []
+    
+    for symptom in symptom_list:
+        # 检查是否已处理过该症状或其子集
+        if any(symptom in processed_sym or processed_sym in symptom for processed_sym in processed_symptoms):
+            continue
+            
+        # 查找所有匹配位置
+        start_pos = 0
+        while True:
+            pos = marked_sentence.find(symptom, start_pos)
+            if pos == -1:
+                break
+                
+            # 检查这个位置是否已被标记
+            if not any(marked_positions[pos:pos + len(symptom)]):
+                # 标记这个范围的所有位置
+                for i in range(pos, pos + len(symptom)):
+                    marked_positions[i] = True
+                # 替换文本
+                marked_sentence = marked_sentence[:pos] + f'<i style="color:red;">{symptom}</i>' + marked_sentence[pos + len(symptom):]
+                # 将成功标记的症状添加到已处理列表中
+                if symptom not in processed_symptoms:
+                    processed_symptoms.append(symptom)
+                # 更新标记位置数组以适应新插入的标签
+                new_positions = [False] * (len('<i style="color:red;">') + len('</i>'))
+                marked_positions = marked_positions[:pos] + new_positions + marked_positions[pos:]
+            
+            start_pos = pos + len('<i style="color:red;">') + len(symptom) + len('</i>')
+            
+    return marked_sentence
+
+@router.post("/kgrt_api/text/eb_search", response_model=StandardResponse)
+@router.post("/knowledge/text/eb_search", response_model=StandardResponse)
+async def node_props_search(request: NodePropsSearchRequest, db: Session = Depends(get_db)):
+    try:
+        start_time = time.time()
+        
+        # 检查缓存
+        cached_result = _check_cache(request.node_id)
+        if cached_result:
+            # 如果有症状列表,处理症状标记
+            if request.symptoms:
+                symptom_list = []
+                try:
+                    # 初始化服务
+                    node_service = KGNodeService(db)
+                    edge_service = KGEdgeService(db)
+                    
+                    for symptom in request.symptoms:
+                        # 添加原始症状
+                        symptom_list.append(symptom)
+                        try:
+                            # 获取症状节点
+                            symptom_node = node_service.get_node_by_name_category(symptom, '症状')
+                            # 获取症状相关同义词(包括1.0和2.0版本)
+                            for category in ['症状同义词', '症状同义词2.0']:
+                                edges = edge_service.get_edges_by_nodes(src_id=symptom_node['id'], category=category)
+                                if edges:
+                                    # 添加同义词
+                                    for edge in edges:
+                                        if edge['dest_node'] and edge['dest_node'].get('name'):
+                                            symptom_list.append(edge['dest_node']['name'])
+                        except ValueError:
+                            # 如果找不到节点,只添加原始症状
+                            continue
+                    
+                    # 按照字符长度进行倒序排序
+                    symptom_list.sort(key=len, reverse=True)
+                    
+                    # 处理缓存结果中的症状标记
+                    for prop in cached_result.get('props', []):
+                        if prop.get('prop_title') == '临床表现' and 'answer' in prop:
+                            for answer in prop['answer']:
+                                answer['sentence'] = _mark_symptoms(answer['sentence'], symptom_list)
+                except Exception as e:
+                    logger.error(f"处理症状标记失败: {str(e)}")
+            
+            return StandardResponse(success=True, data=cached_result)
+
+        # 初始化服务
+        trunks_service = TrunksService()
+        node_service = KGNodeService(db)
+        prop_service = KGPropService(db)
+        edge_service = KGEdgeService(db)
+
+        # 获取节点信息
+        result = _get_node_info(node_service, request.node_id)
+        node_name = result["name"]
+
+        # 处理症状列表
+        symptom_list = []
+        if request.symptoms:
+            for symptom in request.symptoms:
+                try:
+                    # 添加原始症状
+                    symptom_list.append(symptom)
+                    # 获取症状节点
+                    symptom_node = node_service.get_node_by_name_category(symptom, '症状')
+                    # 获取症状相关同义词(包括1.0和2.0版本)
+                    for category in ['症状同义词', '症状同义词2.0']:
+                        edges = edge_service.get_edges_by_nodes(src_id=symptom_node['id'], category=category)
+                        if edges:
+                            # 添加同义词
+                            for edge in edges:
+                                if edge['dest_node'] and edge['dest_node'].get('name'):
+                                    symptom_list.append(edge['dest_node']['name'])
+                except ValueError:
+                    # 如果找不到节点,只添加原始症状
+                    continue
+            
+            # 按照字符长度进行倒序排序
+            symptom_list.sort(key=len, reverse=True)
+
+        # 遍历props_ids查询属性信息
+        for prop_id in request.props_ids:
+            prop = prop_service.get_prop_by_id(prop_id)
+            if not prop:
+                logger.warning(f"属性不存在: {prop_id}")
+                continue
+
+            prop_title = prop.get('prop_title', '')
+            prop_value = prop.get('prop_value', '')
+
+            # 创建属性结果对象
+            prop_result = {
+                "id": prop_id,
+                "category": prop.get('category', 0),
+                "prop_name": prop.get('prop_name', ''),
+                "prop_value": prop_value,
+                "prop_title": prop_title,
+                "type": prop.get('type', 1)
+            }
+            result["props"].append(prop_result)
+
+            # 如果prop_value为'无',则跳过搜索
+            if prop_value == '无':
+                prop_result["answer"] = [{
+                    "sentence": prop_value,
+                    "flag": ""
+                }]
+                continue
+
+            # 先用完整的prop_value进行搜索
+            search_text = f"{node_name}:{prop_title}:{prop_value}"
+            # 使用向量搜索获取相似内容
+            search_results = trunks_service.search_by_vector(
+                text=search_text,
+                limit=500,
+                type='trunk',
+                distance=0.7
+            )
+            
+            # 准备语料库数据
+            trunk_texts = []
+            trunk_ids = []
+            
+            # 创建一个字典来存储trunk的详细信息
+            trunk_details = {}
+            
+            for trunk in search_results:
+                trunk_texts.append(trunk.get('content'))
+                trunk_ids.append(trunk.get('id'))
+                # 缓存trunk的详细信息
+                trunk_details[trunk.get('id')] = {
+                    'id': trunk.get('id'),
+                    'content': trunk.get('content'),
+                    'file_path': trunk.get('file_path'),
+                    'title': trunk.get('title'),
+                    'referrence': trunk.get('referrence'),
+                    'page_no': trunk.get('page_no')
+                }
+
+            if len(trunk_texts)==0:
+                continue
+
+            # 初始化TextSimilarityFinder并加载语料库
+            similarity_finder = TextSimilarityFinder(method='tfidf', use_jieba=True)
+            similarity_finder.load_corpus(trunk_texts, trunk_ids)
+
+            similar_results = similarity_finder.find_most_similar(search_text, top_n=1)
+            
+            # 处理搜索结果
+            if similar_results and similar_results[0]['similarity']>=0.3:  # 设置相似度阈值
+                # 获取最相似的文本对应的trunk_id
+                trunk_id = similar_results[0]['path']
+                
+                # 从缓存中获取trunk详细信息
+                trunk_info = trunk_details.get(trunk_id)
+                
+                if trunk_info:
+                    search_result = {
+                        **trunk_info,
+                        'distance': similar_results[0]['similarity']  # 转换相似度为距离
+                    }
+                    
+                    reference, _ = _process_search_result(search_result, 1)
+                    prop_result["references"] = [reference]
+                    prop_result["answer"] = [{
+                        "sentence": prop_value,
+                        "flag": "1"
+                    }]
+                else:
+                    # 如果整体搜索没有找到匹配结果,则进行句子拆分搜索
+                    sentences = SentenceUtil.split_text(prop_value,10)
+            else:
+                # 如果整体搜索没有找到匹配结果,则进行句子拆分搜索
+                sentences = SentenceUtil.split_text(prop_value,10)
+                result_sentences, references = _process_sentence_search(
+                    node_name, prop_title, sentences, trunks_service
+                )
+                if references:
+                    prop_result["references"] = references
+                if result_sentences:
+                    prop_result["answer"] = result_sentences
+
+        # 处理文件信息
+        all_files = set()
+        file_index_map = {}
+        file_index = 1
+
+        # 收集文件信息
+        for prop_result in result["props"]:
+            if "references" not in prop_result:
+                continue
+            for ref in prop_result["references"]:
+                referrence = ref.get("referrence", "")
+                if not (referrence and "/books/" in referrence):
+                    continue
+                file_name = referrence.split("/books/")[-1]
+                if not file_name:
+                    continue
+                file_type = _get_file_type(file_name)
+                if file_name not in file_index_map:
+                    file_index_map[file_name] = file_index
+                    file_index += 1
+                all_files.add((file_name, file_type))
+
+        # 更新引用索引
+        for prop_result in result["props"]:
+            if "references" not in prop_result:
+                continue
+            for ref in prop_result["references"]:
+                referrence = ref.get("referrence", "")
+                if referrence and "/books/" in referrence:
+                    file_name = referrence.split("/books/")[-1]
+                    if file_name in file_index_map:
+                        ref["index"] = f"{file_index_map[file_name]}-{ref['index']}"
+
+            # 更新answer中的index
+            if "answer" in prop_result:
+                for sentence in prop_result["answer"]:
+                    if sentence["index"]:
+                        for ref in prop_result["references"]:
+                            if ref["index"].endswith(f"-{sentence['index']}"):
+                                sentence["flag"] = ref["index"]
+                                break
+
+        # 添加文件信息到结果
+        result["files"] = sorted([{
+            "file_name": file_name,
+            "file_type": file_type,
+            "index": str(file_index_map[file_name])
+        } for file_name, file_type in all_files], key=lambda x: int(x["index"]))
+
+        # 缓存结果
+        cache_key = f"xunzheng_{request.node_id}"
+        cache[cache_key] = result
+
+        # 处理症状标记
+        if request.symptoms:
+            for prop in result.get('props', []):
+                if prop.get('prop_title') == '临床表现' and 'answer' in prop:
+                    for answer in prop['answer']:
+                        answer['sentence'] = _mark_symptoms(answer['sentence'], symptom_list)
+
+        end_time = time.time()
+        logger.info(f"node_props_search接口耗时: {(end_time - start_time) * 1000:.2f}ms")
+
+        return StandardResponse(success=True, data=result)
+    except Exception as e:
+        logger.error(f"Node props search failed: {str(e)}")
+        raise HTTPException(status_code=500, detail=str(e))
+
+class FindSimilarTexts(BaseModel):
+    keywords:Optional[List[str]] = None
+    search_text: str
+
+@router.post("/knowledge/text/find_similar_texts", response_model=StandardResponse)
+async def find_similar_texts(request: FindSimilarTexts, db: Session = Depends(get_db)):
+    trunks_service = TrunksService()
+    search_text = request.search_text
+    if request.keywords:
+        search_text = f"{request.keywords}:{search_text}"
+    # 使用向量搜索获取相似内容
+    search_results = trunks_service.search_by_vector(
+        text=search_text,
+        limit=500,
+        type='trunk',
+        distance=0.7
+    )
+
+    # 准备语料库数据
+    trunk_texts = []
+    trunk_ids = []
+
+    # 创建一个字典来存储trunk的详细信息
+    trunk_details = {}
+
+    for trunk in search_results:
+        trunk_texts.append(trunk.get('content'))
+        trunk_ids.append(trunk.get('id'))
+        # 缓存trunk的详细信息
+        trunk_details[trunk.get('id')] = {
+            'id': trunk.get('id'),
+            'content': trunk.get('content'),
+            'file_path': trunk.get('file_path'),
+            'title': trunk.get('title'),
+            'referrence': trunk.get('referrence'),
+            'page_no': trunk.get('page_no')
+        }
+
+    if len(trunk_texts) == 0:
+        return
+
+    # 初始化TextSimilarityFinder并加载语料库
+    similarity_finder = TextSimilarityFinder(method='tfidf', use_jieba=True)
+    similarity_finder.load_corpus(trunk_texts, trunk_ids)
+
+    similar_results = similarity_finder.find_most_similar(search_text, top_n=1)
+    prop_result = {}
+    # 处理搜索结果
+    if similar_results and similar_results[0]['similarity'] >= 0.3:  # 设置相似度阈值
+        # 获取最相似的文本对应的trunk_id
+        trunk_id = similar_results[0]['path']
+
+        # 从缓存中获取trunk详细信息
+        trunk_info = trunk_details.get(trunk_id)
+
+        if trunk_info:
+            search_result = {
+                **trunk_info,
+                'distance': similar_results[0]['similarity']  # 转换相似度为距离
+            }
+
+            reference, _ = _process_search_result(search_result, 1)
+            prop_result["references"] = [reference]
+            prop_result["answer"] = [{
+                 "sentence": request.search_text,
+                 "flag": "1"
+            }]
+    else:
+        # 如果整体搜索没有找到匹配结果,则进行句子拆分搜索
+        sentences = SentenceUtil.split_text(request.search_text, 10)
+        result_sentences, references = _process_sentence_search_keywords(
+            sentences, trunks_service,keywords=request.keywords
+        )
+        if references:
+            prop_result["references"] = references
+        if result_sentences:
+            prop_result["answer"] = result_sentences
+    return StandardResponse(success=True,data=prop_result)
+
+
 text_search_router = router

+ 0 - 73
src/knowledge/server.py

@@ -42,79 +42,6 @@ async def health_check():
         "service": "knowledge-graph"
     }
 
-
-@app.post("/test", operation_id="医疗知识图谱目标节点查询", summary="根据医疗知识图谱获取医疗相关信息",
-         description="""根据三元组的起始节点名称和关系名称,查询目标节点列表。
-         该接口主要用于医疗知识图谱查询场景,例如:通过输入疾病名称和相关关系类型,
-         返回该疾病对应的相关症状、治疗方法等信息。
-         典型应用场景包括:
-         - 症状查询:输入疾病名称和"疾病相关症状"关系
-         - 诊断依据查询:输入疾病名称和"诊断依据"关系
-         - 鉴别诊断查询:输入疾病名称和"疾病相关鉴别诊断"关系""",
-         response_description="""返回目标节点名称的字符串列表,格式为:
-         ["节点名称1", "节点名称2", ...]""")
-async def test(node_name: str = Query(...,
-                  description="""知识图谱三元组的起始节点名称,通常是疾病名称。
-                 示例值:感冒、高血压、糖尿病等""",
-                  example="糖尿病"),
-                node_category: str = Query(...,
-                  description="""知识图谱三元组的起始节点类型,通常是疾病。
-                 示例值:疾病、症状等""",
-                  example="疾病"),
-                relation_name: str= Query(...,
-                  description="""知识图谱三元组的关系名称,描述节点间的关系类型。
-                 常见关系类型包括:
-                 - 疾病相关症状
-                 - 诊断依据
-                 - 疾病相关鉴别诊断""",
-                  example="疾病相关症状"), 
-                db: Session = Depends(get_db)) -> list[str]:    
-    """
-    根据起始节点名称和关系名称查询目标节点名称列表
-    
-    参数:
-        node_name: 起始节点名称(通常是疾病名称)
-        relation_name: 关系类型名称
-        
-    返回:
-        目标节点名称的字符串列表,如果查询不到结果则返回空列表
-    """
-    try:
-        service = KGNodeService(db)
-        search_params = {
-            'keyword': node_name,
-            'category': node_category,
-            'pageNo': 1,
-            'limit': 1,
-            'load_props': False,
-            'distance': 0.45,
-        }
-        node_list = service.paginated_search(search_params)
-        edge_service = KGEdgeService(db)
-        
-        results = []
-        if node_list and node_list.get('records'):
-            first_node = node_list['records'][0]
-            src_id = first_node['id']
-            edges = edge_service.get_edges_by_nodes(src_id=src_id, dest_id=None,
-                                                  name=relation_name)
-           
-            for edge in edges:
-                dest_node = edge['dest_node']
-                results.append(dest_node['name'])
-        return results
-    except Exception as e:
-        logger.exception(f"分页查询失败: {str(e)}")
-        raise HTTPException(
-            status_code=500,
-            detail=StandardResponse(
-                success=False,
-                error_code=500,
-                error_msg=str(e)
-            )
-        )
-
-
 async def init_setup():
     """初始化项目配置"""
 

+ 40 - 35
src/knowledge/service/kg_graph_service.py

@@ -110,16 +110,18 @@ class KgGraphService:
         # 处理中心节点
         graph_dto = res[0]
         g_node_dto = {
-            'name': graph_dto['name'],
-            'category': 0,
-            'label': graph_dto['label'],
-            'id': node_id,
-            'symbol': 'circle',
-            'size': 50,
-            'properties': graph_dto['properties'],
-            'kg_id': graph_dto['id'],
-            'itemStyle': item_style_map
+            "label": graph_dto["name"],
+            'type': graph_dto["label"],
+            "category": 0,
+            "name": "0",
+            # "id": graph_dto["id"],
+            "symbol": "circle",
+            "symbolSize": 50,
+            "properties": graph_dto["properties"],
+            "nodeId": graph_dto["id"],
+            "itemStyle": {"display": True}
         }
+
         node_id += 1
         graph_data['node'].append(g_node_dto)
         
@@ -135,24 +137,25 @@ class KgGraphService:
                 
                 # 添加关系节点
                 n_node_dto = {
-                    'name': "",
-                    'category': 1,
-                    'label': graph_dto['label'],
-                    'id': node_id,
-                    'symbol': 'diamond',
-                    'size': 10,
-                    'properties': graph_dto['properties'],
-                    'kg_id': graph_dto['id'],
-                    'itemStyle': item_style_map
+                    "label": "",
+                    'type': graph_dto["label"],
+                    "category": 1,
+                    "name": node_id,
+                    # "id": len(nodes),
+                    "symbol": "diamond",
+                    "symbolSize": 10,
+                    "properties": graph_dto["properties"],
+                    "nodeId": graph_dto["label"],
+                    "itemStyle": item_style_map
                 }
                 graph_data['node'].append(n_node_dto)
                 
                 # 添加关系链接
                 graph_data['links'].append({
-                    'source': g_node_dto['name'],
-                    'target': n_node_dto['name'],
-                    'label': r_type,
-                    'category': r_type
+                    'source': str(g_node_dto['name']),
+                    'target': str(n_node_dto['name']),
+                    'value': r_type,
+                    'relationShipType': r_type
                 })
                 
                 node_id += 1
@@ -166,25 +169,27 @@ class KgGraphService:
                     children_item_style_map["display"] = False
                 
                 e_node_dto = {
-                    'name': target['name'],
-                    'category': c_map[r_type],
-                    'label': target['label'],
-                    'id': node_id,
-                    'symbol': symbol,
-                    'size': 28,
-                    'properties': target['properties'],
-                    'kg_id': target['id'],
-                    'itemStyle': children_item_style_map
+                    "label": target["name"],
+                    "type": graph_dto["label"],
+                    "category": c_map[r_type],
+                    "name": node_id,
+                    # "id": e_node["Id"],
+                    "symbol": "circle",
+                    "symbolSize": 28,
+                    "properties": target["properties"],
+                    "nodeId": target["id"],
+                    "itemStyle": children_item_style_map
                 }
+
                 node_id += 1
                 graph_data['node'].append(e_node_dto)
                 
                 # 添加目标节点链接
                 graph_data['links'].append({
-                    'source': n_node_dto['name'],
-                    'target': e_node_dto['name'],
-                    'label': "",
-                    'category': r_type
+                    'source': str(n_node_dto['name']),
+                    'target': str(e_node_dto['name']),
+                    'value': "",
+                    'relationShipType': r_type
                 })
         
         return graph_data

+ 233 - 0
src/knowledge/utils/json_to_text.py

@@ -0,0 +1,233 @@
+import json
+
+class JsonToText:
+    def convert(self, json_data):
+        output = []
+        output.append(f"年龄:{json_data['age']}岁")
+        output.append(f"性别:{'女' if json_data['sex'] == 2 else '男'}")
+        output.append(f"职业:{json_data['doctor']['professionalTitle']}")
+        output.append(f"科室:{json_data['dept'][0]['name']}")
+        output.append("\n详细信息")
+        output.append(f"主诉:{json_data['chief']}")
+        output.append(f"现病史:{json_data['symptom']}")
+        output.append(f"查体:{json_data['vital']}")
+        output.append(f"既往史:{json_data['pasts']}")
+        output.append(f"婚姻状况:{json_data['marital']}")
+        output.append(f"个人史:{json_data['personal']}")
+        output.append(f"家族史:{json_data['family']}")
+        output.append(f"月经史:{json_data['menstrual']}")
+        output.append(f"疾病名称:{json_data['diseaseName']['name']}")
+        output.append("其他指数:无")
+        output.append(f"手术名称:{json_data['operationName']['name']}")
+        output.append("传染性:无")
+        output.append("手术记录:无")
+        output.append(f"过敏史:{json_data['allergy'] or '无'}")
+        output.append("疫苗接种:无")
+        output.append("其他:无")
+        output.append("检验申请单:无")
+        output.append("影像申请单:无")
+        output.append("诊断申请单:无")
+        output.append("用药申请单:无")
+        output.append("检验结果:无")
+        output.append("影像结果:无")
+        output.append("诊断结果:无")
+        output.append("用药记录:无")
+        output.append("输血记录:无")
+        output.append("\n科室信息")
+        output.append(f"科室名称:{json_data['dept'][0]['name']}")
+        output.append(f"唯一名称:{json_data['dept'][0]['uniqueName']}")
+        return '\n'.join(output)
+
+class JsonToTextConverter:
+    @staticmethod
+    def convert(json_str):
+        json_data = json.loads(json_str)
+        return JsonToText().convert(json_data)
+
+    def convert(self, json_str):
+        json_data = json.loads(json_str)
+        return JsonToText().convert(json_data)
+        output.append(f"年龄:{json_data['age']}岁")
+        output.append(f"性别:{'女' if json_data['sex'] == 2 else '男'}")
+        output.append(f"职业:{json_data['doctor']['professionalTitle']}")
+        output.append(f"科室:{json_data['dept'][0]['name']}")
+        output.append("\n详细信息")
+        output.append(f"主诉:{json_data['chief']}")
+        output.append(f"现病史:{json_data['symptom']}")
+        output.append(f"查体:{json_data['vital']}")
+        output.append(f"既往史:{json_data['pasts']}")
+        output.append(f"婚姻状况:{json_data['marital']}")
+        output.append(f"个人史:{json_data['personal']}")
+        output.append(f"家族史:{json_data['family']}")
+        output.append(f"月经史:{json_data['menstrual']}")
+        output.append(f"疾病名称:{json_data['diseaseName']['name']}")
+        output.append("其他指数:无")
+        output.append(f"手术名称:{json_data['operationName']['name']}")
+        output.append("传染性:无")
+        output.append("手术记录:无")
+        output.append(f"过敏史:{json_data['allergy'] or '无'}")
+        output.append("疫苗接种:无")
+        output.append("其他:无")
+        output.append("检验申请单:无")
+        output.append("影像申请单:无")
+        output.append("诊断申请单:无")
+        output.append("用药申请单:无")
+        output.append("检验结果:无")
+        output.append("影像结果:无")
+        output.append("诊断结果:无")
+        output.append("用药记录:无")
+        output.append("输血记录:无")
+        output.append("\n科室信息")
+        output.append(f"科室名称:{json_data['dept'][0]['name']}")
+        output.append(f"唯一名称:{json_data['dept'][0]['uniqueName']}")
+        return '\n'.join(output)
+
+    def convert(self, json_str):
+        json_data = json.loads(json_str)
+        return JsonToText().convert(json_data)
+        output.append(f"年龄:{json_data['age']}岁")
+        output.append(f"性别:{'女' if json_data['sex'] == 2 else '男'}")
+        output.append(f"职业:{json_data['doctor']['professionalTitle']}")
+        output.append(f"科室:{json_data['dept'][0]['name']}")
+        output.append("\n详细信息")
+        output.append(f"主诉:{json_data['chief']}")
+        output.append(f"现病史:{json_data['symptom']}")
+        output.append(f"查体:{json_data['vital']}")
+        output.append(f"既往史:{json_data['pasts']}")
+        output.append(f"婚姻状况:{json_data['marital']}")
+        output.append(f"个人史:{json_data['personal']}")
+        output.append(f"家族史:{json_data['family']}")
+        output.append(f"月经史:{json_data['menstrual']}")
+        output.append(f"疾病名称:{json_data['diseaseName']['name']}")
+        output.append("其他指数:无")
+        output.append(f"手术名称:{json_data['operationName']['name']}")
+        output.append("传染性:无")
+        output.append("手术记录:无")
+        output.append(f"过敏史:{json_data['allergy'] or '无'}")
+        output.append("疫苗接种:无")
+        output.append("其他:无")
+        output.append("检验申请单:无")
+        output.append("影像申请单:无")
+        output.append("诊断申请单:无")
+        output.append("用药申请单:无")
+        output.append("检验结果:无")
+        output.append("影像结果:无")
+        output.append("诊断结果:无")
+        output.append("用药记录:无")
+        output.append("输血记录:无")
+        output.append("\n科室信息")
+        output.append(f"科室名称:{json_data['dept'][0]['name']}")
+        output.append(f"唯一名称:{json_data['dept'][0]['uniqueName']}")
+        return '\n'.join(output)
+
+
+
+if __name__ == '__main__':
+    json_data = {
+        "hospitalId": -1,
+        "age": "28",
+        "sex": 2,
+        "doctor": {
+            "professionalTitle": "付医生"
+        },
+        "chief": "反复咳嗽、咳痰伴低热2月余,加重伴夜间盗汗1周。",
+        "symptom": "2小时前无诱因下出现持续性上腹部绞痛,剧痛难忍,伴恶心慢性,无呕吐,无大小便异常,曾至当地卫生院就诊,查血常规提示:血小板计数5*10^9/L",
+        "vital": "神清,急性病容,皮肤巩膜黄软,心肺无殊,腹平软,上腹部压痛明显,无反跳痛",
+        "pasts": "既往有胆总管结石,既往青霉素过敏",
+        "marriage": "",
+        "personal": "不饮酒,不抽烟",
+        "family": "不详",
+        "marital": "未婚未育",
+        "menstrual": "末次月经2020-12-23,月经期第二天",
+        "diseaseName": {
+            "dateValue": "",
+            "name": "胆囊结石伴有急性胆囊炎",
+            "uniqueName": ""
+        },
+        "otherIndex": {},
+        "operationName": {
+            "dateValue": "2020-12-24 17:39:20",
+            "name": "经皮肝穿刺引流术",
+            "uniqueName": "经皮肝穿刺引流术"
+        },
+        "infectious": "",
+        "operation": [],
+        "allergy": "",
+        "vaccination": "",
+        "other": "",
+        "lisString": "",
+        "pacsString": "",
+        "diagString": "",
+        "drugString": "",
+        "lis": [],
+        "pacs": [],
+        "diag": [
+            {
+                "dateValue": "",
+                "name": "胆囊结石伴有急性胆囊炎",
+                "uniqueName": ""
+            }
+        ],
+        "lisOrder": [],
+        "pacsOrder": [
+            {
+                "uniqueName": "经皮肝穿刺胆管造影",
+                "detailName": "经皮肝穿刺胆管造影",
+                "name": "经皮肝穿刺胆管造影",
+                "dateValue": "2020-12-24 17:33:52",
+                "time": "2020-12-24 17:33:52",
+                "check": True
+            }
+        ],
+        "diagOrder": [],
+        "drugOrder": [
+            {
+                "uniqueName": "利多卡因",
+                "detailName": "利多卡因",
+                "name": "利多卡因注射剂",
+                "flg": 5,
+                "time": "2020-12-24 17:37:27",
+                "dateValue": "2020-12-24 17:37:27",
+                "selectShow": False,
+                "check": True,
+                "form": "注射剂",
+                "selectVal": "1"
+            },
+            {
+                "uniqueName": "青霉素",
+                "detailName": "青霉素",
+                "name": "青霉素注射剂",
+                "flg": 5,
+                "time": "2020-12-24 17:40:08",
+                "dateValue": "2020-12-24 17:40:08",
+                "selectShow": False,
+                "check": True,
+                "form": "注射剂",
+                "selectVal": "1"
+            }
+        ],
+        "operationOrder": [
+            {
+                "uniqueName": "经皮肝穿刺引流术",
+                "detailName": "经皮肝穿刺引流术",
+                "name": "经皮肝穿刺引流术",
+                "flg": 6,
+                "time": "2020-12-24 17:39:20",
+                "dateValue": "2020-12-24 17:39:20",
+                "hasTreat": 1,
+                "check": True
+            }
+        ],
+        "otherOrder": [],
+        "drug": [],
+        "transfusion": [],
+        "transfusionOrder": [],
+        "dept": [
+            {
+                "name": "全科",
+                "uniqueName": "全科"
+            }
+        ]
+    }
+
+    print(JsonToText().convert(json_data))