Forráskód Böngészése

字段标准化修改-台州邵逸夫入北仑

rengb 4 éve
szülő
commit
2db614d215

+ 88 - 0
kernel/src/main/java/com/lantone/qc/kernel/structure/ai/ConsultationAI.java

@@ -0,0 +1,88 @@
+package com.lantone.qc.kernel.structure.ai;
+
+import com.alibaba.fastjson.JSONArray;
+import com.alibaba.fastjson.JSONObject;
+import com.google.common.collect.Lists;
+import com.lantone.qc.kernel.client.CRFServiceClient;
+import com.lantone.qc.kernel.structure.ai.process.EntityProcessDrug;
+import com.lantone.qc.kernel.util.CatalogueUtil;
+import com.lantone.qc.pub.model.InputInfo;
+import com.lantone.qc.pub.model.doc.consultation.ConsultationDoc;
+import com.lantone.qc.pub.model.doc.consultation.ConsultationResultsDoc;
+import com.lantone.qc.pub.model.entity.Drug;
+import com.lantone.qc.pub.model.label.ConsultationResultLabel;
+import com.lantone.qc.pub.util.StringUtil;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+/**
+ * @ClassName : ConsultationAI
+ * @Description :
+ * @Author : 胡敬
+ * @Date: 2020-09-01 16:40
+ */
+public class ConsultationAI extends ModelAI {
+    /**
+     *
+     */
+    public static List<String> medicalTextType = Arrays.asList("CourseRecordSRR");
+    public static String entityRelationObject = "entity_relation_object";
+    public static String outputs = "outputs";
+    public static String content = "content";
+    public static List<String> mapKey = Lists.newArrayList("会诊意见");
+
+    public void medrec(InputInfo inputInfo, CRFServiceClient crfServiceClient) {
+        JSONArray crfContent = new JSONArray();
+        List<ConsultationDoc> docs = inputInfo.getConsultationDocs();
+        List<ConsultationResultsDoc> resultsDocs = docs.stream().map(ConsultationDoc::getConsultationResultsDoc).filter(Objects::nonNull).collect(Collectors.toList());
+        for (int i = 0; i < resultsDocs.size(); i++) {
+            Map<String, String> structureMap = resultsDocs.get(i).getStructureMap();
+            //存放抓取的药品
+            String content = CatalogueUtil.structureMapJoin(structureMap, mapKey);
+            putContent(crfContent, medicalTextType.get(0), content, i + "药物");
+        }
+
+        JSONObject midData = loadAI(inputInfo.isUseCrfCache(), inputInfo.getMedicalRecordInfoDoc().getStructureMap().get("behospitalCode"), crfContent, crfServiceClient);//crf返回数据
+
+        for (int i = 0; i < resultsDocs.size(); i++) {
+            if (midData.get(i + "药物") == null) {
+                continue;
+            }
+            ConsultationResultsDoc resultsDoc = resultsDocs.get(i);
+            ConsultationResultLabel consultationResultLabel = new ConsultationResultLabel();
+            List<Drug> drugs = putDrugCrfData(midData.getJSONObject(i + "药物"));
+            consultationResultLabel.setDrugs(drugs);
+            resultsDoc.setConsultationResultLabel(consultationResultLabel);
+        }
+    }
+
+
+    /**
+     * 存放抓取的药品
+     *
+     * @param jsonObject
+     */
+    public List<Drug> putDrugCrfData(JSONObject jsonObject) {
+        JSONObject aiOut = loadEntity(jsonObject, entityRelationObject, outputs, content);
+        if (aiOut == null) {
+            return new ArrayList<>();
+        }
+        EntityProcessDrug entityProcessDrug = new EntityProcessDrug();
+        List<Drug> drugs = entityProcessDrug.extractEntity(aiOut);
+        return drugs;
+    }
+
+    protected void putContent(JSONArray crfContent, String medicalTextType, String text, String sign) {
+        String move_text = CatalogueUtil.removeSpecialChar(text);
+        if (StringUtil.isEmpty(move_text)) {
+            return;
+        }
+        JSONObject detailContent = new JSONObject();
+        detailContent.put("medical_text_type", medicalTextType);
+        detailContent.put("content", move_text);
+        detailContent.put("detail_title", sign);
+        detailContent.put("originalText", text);
+        crfContent.add(detailContent);
+    }
+}

+ 205 - 0
kernel/src/main/java/com/lantone/qc/kernel/structure/ai/model/ConflictFinder.java

@@ -0,0 +1,205 @@
+package com.lantone.qc.kernel.structure.ai.model;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * @Description: 找出冲突
+ * @Author: HUJING
+ * @Date: 2020/8/2 15:04
+ */
+public class ConflictFinder {
+
+    private static Set<String> ignoreWords;
+    private static Set<String> negativeWords;
+    private static Map<String, String> conflictWordPairMap;
+
+    static {
+        ignoreWords = Sets.newHashSet("部", "侧");
+        negativeWords = Sets.newHashSet("无", "无殊", "否认", "不伴", "未找到",
+                "未萌出", "未见", "未", "不", "不伴有", "非", "不明显", "无明显");
+        conflictWordPairMap = Maps.newHashMap();
+        conflictWordPairMap.put("可", "差");
+        conflictWordPairMap.put("尚可", "差");
+    }
+
+
+    /**
+     * 搜索和配对
+     *
+     * @param invertedIndexTableCheck   专科检查的倒排索引
+     * @param invertedIndexTablePresent 现病史的倒排索引
+     */
+    public List<EntityBlock[]> match(Map<String, Set<EntityBlock>> invertedIndexTableCheck,
+                                     Map<String, Set<EntityBlock>> invertedIndexTablePresent) {
+
+        List<EntityBlock[]> entityBlockPairs = Lists.newArrayList();
+        Set<String> existedIdPairs = Sets.newHashSet();  // 去除多搜索词搜到同一组实体块对引起的重复
+        invertedIndexTableCheck.forEach((char_, entityBlockSetCheck) -> {
+            invertedIndexTablePresent.getOrDefault(char_, Sets.newHashSet()).forEach(entityBlockPresent -> {
+                entityBlockSetCheck.forEach(entityBlockCheck -> {
+                    String idPair = "" + entityBlockCheck.getId() + "_" + entityBlockPresent.getId();
+                    if (!existedIdPairs.contains(idPair)) {
+                        entityBlockPairs.add(new EntityBlock[] { entityBlockCheck, entityBlockPresent });
+
+                        existedIdPairs.add(idPair);
+                    }
+                });
+            });
+        });
+
+        return entityBlockPairs;
+    }
+
+    /**
+     * 字符串1没在字符串2中的子词, // 并不是很严格意义上的那种,不过可以用
+     *
+     * @param word1 字符串1
+     * @param word2 字符串2
+     * @return 字符串列表
+     */
+    private List<String> subWordsInWord1NotInWord2(String word1, String word2) {
+
+        StringBuilder inWord1NotInWord2 = new StringBuilder();
+        Map<String, Integer> word2Map = Maps.newHashMap();
+        for (int i = 0; i < word2.length(); i++) {
+            word2Map.put(word2.substring(i, i + 1), 1);
+        }
+
+        for (int i = 0; i < word1.length(); i++) {
+            String char_ = word1.substring(i, i + 1).trim();
+            if (!"".equals(char_) && !ignoreWords.contains(char_)) {
+                if (!word2Map.containsKey(char_)) {
+                    inWord1NotInWord2.append(char_);
+                } else {
+                    inWord1NotInWord2.append("_s_");  // _s_ 作为分隔符
+                }
+            }
+        }
+        List<String> subWords = Lists.newArrayList();
+        for (String subWord : inWord1NotInWord2.toString().split("_s_")) {
+            if (!"".equals(subWord.trim())) {
+                subWords.add(subWord.trim());
+            }
+
+        }
+        return subWords;
+    }
+
+    /**
+     * 只缺少一个阴性词
+     *
+     * @param word_1 词1
+     * @param word_2 词2
+     * @return 冲突则ture,否则false
+     */
+    private boolean onlyLackOfNegativeWord(String word_1, String word_2) {
+
+        List<String> subWords_1 = subWordsInWord1NotInWord2(word_1, word_2);
+        List<String> subWords_2 = subWordsInWord1NotInWord2(word_2, word_1);
+        if (subWords_1.size() == 1 && subWords_2.size() == 0) {
+            return negativeWords.contains(subWords_1.get(0));
+        }
+        if (subWords_2.size() == 1 && subWords_1.size() == 0) {
+            return negativeWords.contains(subWords_2.get(0));
+        }
+
+        return false;
+    }
+
+    /**
+     * 不同的只有冲突词
+     *
+     * @param word_1 词1
+     * @param word_2 词2
+     * @return 冲突则ture,否则false
+     */
+    private boolean onlyDifferentWithConflictWords(String word_1, String word_2) {
+
+        List<String> subWords_1 = subWordsInWord1NotInWord2(word_1, word_2);
+        List<String> subWords_2 = subWordsInWord1NotInWord2(word_2, word_1);
+        if (subWords_1.size() == 1 && subWords_2.size() == 1) {
+            return conflictWordPairMap.getOrDefault(subWords_1.get(0), "").equals(subWords_2.get(0)) ||
+                    conflictWordPairMap.getOrDefault(subWords_2.get(0), "").equals(subWords_1.get(0));
+
+        }
+        return false;
+    }
+
+    /**
+     * 是否冲突
+     *
+     * @param entityBlock_1 实体块1
+     * @param entityBlock_2 实体块2
+     * @return 冲突true,否则false
+     */
+    public boolean isConflict(EntityBlock entityBlock_1, EntityBlock entityBlock_2) {
+
+        StringBuilder word1Builder = new StringBuilder();
+        entityBlock_1.getEntityWords().forEach(word1Builder::append);
+        String word_1 = word1Builder.toString();
+
+        StringBuilder word2Builder = new StringBuilder();
+        entityBlock_2.getEntityWords().forEach(word2Builder::append);
+        String word_2 = word2Builder.toString();
+
+        if (word_1.length() == 0 || word_2.length() == 0) {
+            return false;
+        }
+
+        return onlyLackOfNegativeWord(word_1, word_2) || onlyDifferentWithConflictWords(word_1, word_2);
+    }
+
+    /**
+     * 获取位置
+     *
+     * @param entityBlock_1 实体块1
+     * @param entityBlock_2 实体块2
+     * @return List<int [ ]> 对
+     */
+    public Object[] getPositions(EntityBlock entityBlock_1, EntityBlock entityBlock_2) {
+        return new Object[] { entityBlock_1.getPositions(), entityBlock_2.getPositions() };
+    }
+
+
+    /**
+     * 找出冲突的位置
+     *
+     * @param checkPairs   专科检查实体和关系列表
+     * @param presentPairs 现病史实体和关系列表
+     * @return 冲突位置组对列表
+     */
+    public List<Object[]> findConflictPositions(Object[] checkPairs, Object[] presentPairs) {
+        List<Lemma> checkLemmas = (List<Lemma>) checkPairs[0];
+        List<Relation> checkRelations = (List<Relation>) checkPairs[1];
+
+        List<Lemma> presentLemmas = (List<Lemma>) presentPairs[0];
+        List<Relation> presentRelations = (List<Relation>) presentPairs[1];
+
+        InvertedIndexTableBuilder checkInvertedIndexTableBuilder = new InvertedIndexTableBuilder(checkLemmas, checkRelations);
+        InvertedIndexTableBuilder presentInvertedIndexTableBuilder = new InvertedIndexTableBuilder(presentLemmas, presentRelations);
+        Map<String, Set<EntityBlock>> checkInvertedIndexTable = checkInvertedIndexTableBuilder.generateInvertedIndexTablePipeline();
+        Map<String, Set<EntityBlock>> presentInvertedIndexTable = presentInvertedIndexTableBuilder.generateInvertedIndexTablePipeline();
+
+        List<EntityBlock[]> entityBlockPairs = match(checkInvertedIndexTable, presentInvertedIndexTable);
+
+        List<Object[]> conflictPositions = Lists.newArrayList();
+        entityBlockPairs.forEach(entityBlockPair -> {
+            EntityBlock entityBlock_1 = entityBlockPair[0];
+            EntityBlock entityBlock_2 = entityBlockPair[1];
+            boolean conflict = isConflict(entityBlock_1, entityBlock_2);
+            // TODO: 删除调
+            //System.out.println("" + conflict + ":" + entityBlock_1 + " ---> " + entityBlock_2);
+            if (conflict) {
+                conflictPositions.add(getPositions(entityBlock_1, entityBlock_2));
+                conflictPositions.add(new Object[]{entityBlock_1,entityBlock_2});
+            }
+        });
+        return conflictPositions;
+    }
+}

+ 27 - 0
kernel/src/main/java/com/lantone/qc/kernel/structure/ai/model/EntityBlock.java

@@ -0,0 +1,27 @@
+package com.lantone.qc.kernel.structure.ai.model;
+
+import lombok.Getter;
+import lombok.Setter;
+
+import java.util.List;
+
+/**
+ * @Description:
+ * @Author: HUJING
+ * @Date: 2020/7/31 11:19
+ */
+@Setter
+@Getter
+public class EntityBlock {
+    private List<String> entityWords;
+    private List<String> entityTypes;
+    private List<int[]> positions;
+    private String genre;
+    private String searchWord;
+    private Integer id;
+
+    @Override
+    public String toString() {
+        return entityWords.toString().replaceAll("[\\[\\]]","");
+    }
+}

+ 282 - 0
kernel/src/main/java/com/lantone/qc/kernel/structure/ai/model/InvertedIndexTableBuilder.java

@@ -0,0 +1,282 @@
+package com.lantone.qc.kernel.structure.ai.model;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * @Description:
+ * @Author: HUJING
+ * @Date: 2020/8/2 12:25
+ */
+public class InvertedIndexTableBuilder {
+    private static Set<String> selectedLonelyEntityTypes;  // 选定的孤独实体类型
+    private static Set<String> selectedRelationTypes;  // 选定的关系类型
+    // 选定的多步关系模式,比如:身体部位->临床表现<-否定
+    private static Set<Set<String>> selectedMultiStepRelationTypes;
+
+    private Map<Integer, Lemma> idEntityMap;
+    private List<Relation> relations;
+    private List<Lemma> lonelyEntityList;
+
+    private static Set<String> removeIndexWords; // 被移除的搜索词
+
+    static {  // 把该模块从现病史和专科检查扩展到别的比较中需要修改的地方
+        selectedLonelyEntityTypes = Sets.newHashSet("临床表现", "修饰");
+        selectedRelationTypes = Sets.newHashSet("身体部位-临床表现",
+                "一般情况-一般情况描述", "否定-临床表现");
+        selectedMultiStepRelationTypes = Sets.newHashSet(Sets.newHashSet());  // 暂且没用上
+
+        removeIndexWords = Sets.newHashSet();
+
+        for (int i = 0; i < 10; i++) {
+            removeIndexWords.add("" + i);
+        }
+
+        for (int i = 0; i < 10; i++) {  //数字
+            removeIndexWords.add(String.valueOf(i));
+        }
+
+        for (int i = 'a'; i <= 'z'; i++) {  //英文词
+            removeIndexWords.add(String.valueOf((char) i));
+            removeIndexWords.add(String.valueOf((char) i).toUpperCase());
+        }
+
+        List<String> punctuations = Lists.newArrayList();//添加标点符号
+        List<String> positionWords = Lists.newArrayList();//添加方位词
+        List<String> emrWords = Lists.newArrayList("无");//添加一些病例中无用的词
+
+        List<String> words = new ArrayList<>();
+        words.addAll(emrWords);
+        words.addAll(punctuations);
+        words.addAll(positionWords);
+
+        removeIndexWords.addAll(words);
+    }
+
+    public InvertedIndexTableBuilder(List<Lemma> lemmas, List<Relation> relations) {
+        idEntityMap = Maps.newHashMap();
+        for (Lemma lemma : lemmas) {
+            idEntityMap.put(lemma.getId(), lemma);
+        }
+
+        this.relations = relations;
+
+    }
+
+    /**
+     * 过滤实体和关系
+     */
+    private void filterEntitiesAndRelations() {
+
+        // 实体删除规则:没在关系中,且其实体类型没在已选的孤独实体类型中
+        Map<Integer, Lemma> newIdEntityMap = Maps.newHashMap();
+        idEntityMap.forEach((id, lemma) -> {
+            if (selectedLonelyEntityTypes.contains(lemma.getProperty())) {
+                newIdEntityMap.put(id, lemma);
+            }
+        });
+
+        List<Relation> filteredRelations = Lists.newArrayList();
+        Set<Integer> entityIdInRelationSet = Sets.newHashSet();
+        for (Relation relation : relations) {
+            Integer fromId = relation.getFrom();
+            Integer toId = relation.getTo();
+            if (selectedRelationTypes.contains(relation.getRelationName())) {
+                filteredRelations.add(relation);
+                newIdEntityMap.put(fromId, idEntityMap.get(fromId));
+                newIdEntityMap.put(toId, idEntityMap.get(toId));
+            }
+
+            entityIdInRelationSet.add(fromId);
+            entityIdInRelationSet.add(toId);
+        }
+
+        this.idEntityMap = newIdEntityMap;
+        this.relations = filteredRelations;
+        this.lonelyEntityList = Lists.newArrayList();
+        this.idEntityMap.forEach((id, lemma) -> {
+            if (!entityIdInRelationSet.contains(id)) { // 不在关系中的实体为孤独实体
+                this.lonelyEntityList.add(lemma);
+            }
+        });
+
+    }
+
+    /**
+     * 分组有关系的实体id对
+     *
+     * @param idPairs 实体id对
+     * @return 实体id对组
+     */
+    public static List<List<Integer>> groupIdPairs(List<Integer[]> idPairs) {
+
+        Map<Integer, Set<Integer>> idRelatedIdsMap = Maps.newHashMap();
+        for (Integer[] idPair : idPairs) {
+            if (idPair.length == 2) {
+                Integer fromId = idPair[0];
+                Integer toId = idPair[1];
+                Set<Integer> fromIds = idRelatedIdsMap.getOrDefault(fromId, Sets.newHashSet());
+                Set<Integer> toIds = idRelatedIdsMap.getOrDefault(toId, Sets.newHashSet());
+                fromIds.addAll(toIds);
+                fromIds.add(fromId);
+                fromIds.add(toId);
+                fromIds.forEach(id -> {  // 更新每个id所关联的ids
+                    idRelatedIdsMap.put(id, fromIds);
+                });
+
+            }
+        }
+
+        List<List<Integer>> idGroups = Lists.newArrayList();
+        Set<Integer> usedIds = Sets.newHashSet();
+
+        idRelatedIdsMap.forEach((id, ids) -> {
+            if (!usedIds.contains(id)) {
+                List<Integer> idsList = Lists.newArrayList();
+                idsList.addAll(ids);
+                idsList.sort(Integer::compareTo);
+                idGroups.add(idsList);
+
+                usedIds.addAll(ids);  // 避免重复
+            }
+        });
+
+        return idGroups;
+    }
+
+    /**
+     * 用关系合并实体
+     *
+     * @return 实体组列表
+     */
+    private List<List<Lemma>> mergeEntitiesByRelations() {
+
+        List<List<Lemma>> entityGroups = Lists.newArrayList();
+
+        // 孤独实体
+        for (Lemma lemma : lonelyEntityList) {
+            entityGroups.add(Lists.newArrayList(lemma));
+        }
+
+        List<Integer[]> idPairs = Lists.newArrayList();
+        relations.forEach(relation -> {
+            idPairs.add(new Integer[] { relation.getFrom(), relation.getTo() });
+        });
+
+        List<List<Integer>> idGroups = groupIdPairs(idPairs);
+        idGroups.forEach(idGroup -> {
+            if (idGroup.size() == 2) {
+                List<Lemma> entityGroup = Lists.newArrayList();
+                idGroup.forEach(id -> {
+                    entityGroup.add(idEntityMap.get(id));
+                });
+                entityGroups.add(entityGroup);
+            } else if (idGroup.size() == 3) {
+                // 将来会用一些模式去匹配,比如:(腹,压痛,反跳痛) 本质是=> 腹->压痛, 腹->反跳痛, 需要拆开成两组
+
+                List<Lemma> entityGroup = Lists.newArrayList();
+                idGroup.forEach(id -> {
+                    entityGroup.add(idEntityMap.get(id));
+                });
+                entityGroups.add(entityGroup);
+            } else {
+                // 将来会用一些模式去配,这里先留下
+            }
+        });
+
+        return entityGroups;
+    }
+
+    /**
+     * 设置一个实体块对象
+     *
+     * @param entityGroup 实体组
+     * @param id          实体块id
+     * @return 实体块对象
+     */
+    private EntityBlock setEntityBlock(List<Lemma> entityGroup, Integer id) {
+
+        List<String> entityWords = Lists.newArrayList();
+        List<String> entityTypes = Lists.newArrayList();
+        List<int[]> positions = Lists.newArrayList();
+        entityGroup.forEach(lemma -> {
+            entityWords.add(lemma.getText());
+            entityTypes.add(lemma.getProperty());
+            positions.add(new int[] { lemma.getFrom(), lemma.getTo() });
+        });
+
+        StringBuilder searchWord = new StringBuilder();
+        for (String word : entityWords) {
+            searchWord.append(word);
+        }
+
+        EntityBlock entityBlock = new EntityBlock();
+        entityBlock.setEntityWords(entityWords);
+        entityBlock.setEntityTypes(entityTypes);
+        entityBlock.setPositions(positions);
+        entityBlock.setSearchWord(searchWord.toString());
+        entityBlock.setId(id);
+
+        return entityBlock;
+    }
+
+    /**
+     * 设置所有实体块对象
+     *
+     * @param entityGroups 实体组列表
+     * @return 实体块对象列表
+     */
+    private List<EntityBlock> setEntityBlocks(List<List<Lemma>> entityGroups) {
+
+        List<EntityBlock> entityBlocks = Lists.newArrayList();
+        for (int i = 0; i < entityGroups.size(); i++) {
+            entityBlocks.add(setEntityBlock(entityGroups.get(i), i));
+        }
+        return entityBlocks;
+    }
+
+    /**
+     * 生成倒排索引表
+     *
+     * @param entityBlocks 实体块对象列表
+     * @return {字符:实体块对象集合},倒排索引表
+     */
+    private Map<String, Set<EntityBlock>> generateInvertedIndexTable(List<EntityBlock> entityBlocks) {
+
+        Map<String, Map<Integer, EntityBlock>> invertedIndexTableTemp = Maps.newHashMap();  // 防止重复
+        entityBlocks.forEach(entityBlock -> {
+            String searchWord = entityBlock.getSearchWord();
+            for (int i = 0; i < searchWord.length(); i++) {
+                String char_ = searchWord.substring(i, i + 1);
+                Map<Integer, EntityBlock> idEntityBlockMap = invertedIndexTableTemp.getOrDefault(char_, Maps.newHashMap());
+                idEntityBlockMap.put(entityBlock.getId(), entityBlock);
+                invertedIndexTableTemp.put(char_, idEntityBlockMap);
+            }
+        });
+
+        Map<String, Set<EntityBlock>> invertedIndexTable = Maps.newHashMap();
+        invertedIndexTableTemp.forEach((char_, idEntityBlockMap) -> {
+            invertedIndexTable.put(char_, Sets.newHashSet(idEntityBlockMap.values()));
+        });
+
+        return invertedIndexTable;
+    }
+
+    /**
+     * 倒排索引生成流水线
+     * @return 倒排索引表
+     */
+    public Map<String, Set<EntityBlock>> generateInvertedIndexTablePipeline(){
+        filterEntitiesAndRelations();
+        List<List<Lemma>> entityGroups = mergeEntitiesByRelations();
+        List<EntityBlock> entityBlocks = setEntityBlocks(entityGroups);
+        return generateInvertedIndexTable(entityBlocks);
+    }
+
+}

+ 89 - 0
kernel/src/main/java/com/lantone/qc/kernel/structure/ai/process/EntityProcessDrug.java

@@ -0,0 +1,89 @@
+package com.lantone.qc.kernel.structure.ai.process;
+
+import com.alibaba.fastjson.JSONObject;
+import com.lantone.qc.kernel.structure.ai.model.EntityEnum;
+import com.lantone.qc.kernel.structure.ai.model.Lemma;
+import com.lantone.qc.pub.model.entity.*;
+import org.apache.commons.beanutils.BeanUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Component;
+
+import java.util.ArrayList;
+import java.util.List;
+
+@Component
+public class EntityProcessDrug extends EntityProcess {
+    private Logger logger = LoggerFactory.getLogger(EntityProcessDrug.class);
+
+    public List<Drug> extractEntity(JSONObject aiOut) {
+        String content = aiOut.getString("content");
+        //药物
+        List<Drug> drugs = new ArrayList<>();
+        try {
+            List<Lemma> drugLemmas = createEntityTree(aiOut, EntityEnum.DRUG.toString());
+            List<Lemma> consumptionLemmas = createEntityTree(aiOut, EntityEnum.CONSUMPTION.toString().split("-")[0]);
+            List<Lemma> usageWardRoundLemmas = createEntityTree(aiOut, EntityEnum.USAGE_WARD_ROUND.toString());
+            List<Lemma> frequencyLemmas = createEntityTree(aiOut, EntityEnum.FREQUENCY.toString());
+            List<Lemma> stopLemmas = createEntityTree(aiOut, EntityEnum.STOP.toString());
+            List<Lemma> reasonsForAntibioticLemmas = createEntityTree(aiOut, EntityEnum.REASONS_FOR_ANTIBIOTIC.toString());
+            for (Lemma lemma : drugLemmas) {
+                int lemmaPosition = Integer.parseInt(lemma.getPosition());
+                if (content.substring(Math.max(0, lemmaPosition - 10), lemmaPosition).contains("暂停")) {
+                    continue;
+                }
+                Drug drug = new Drug();
+                drug.setName(lemma.getText().replaceAll("[“”]",""));
+                drug.setConsumption(findTAfter(lemma, new Consumption(), EntityEnum.CONSUMPTION.toString().split("-")[0]));
+                drug.setUsageWardRound(findTAfter(lemma, new UsageWardRound(), EntityEnum.USAGE_WARD_ROUND.toString()));
+                drug.setFrequency(findTAfter(lemma, new Frequency(), EntityEnum.FREQUENCY.toString()));
+                drug.setStop(findTAfter(lemma, new Stop(), EntityEnum.STOP.toString()));
+                drug.setReasonsForAntibiotic(findTAfter(lemma, new ReasonsForAntibiotic(), EntityEnum.REASONS_FOR_ANTIBIOTIC.toString()));
+                //用量
+                if (drug.getConsumption() == null) {
+                    drug.setConsumption(setDrugs(consumptionLemmas, lemma, new Consumption()));
+                }
+                //用法
+                if (drug.getUsageWardRound() == null) {
+                    drug.setUsageWardRound(setDrugs(usageWardRoundLemmas, lemma, new UsageWardRound()));
+                }
+                //频率
+                if (drug.getFrequency() == null) {
+                    drug.setFrequency(setDrugs(frequencyLemmas, lemma, new Frequency()));
+                }
+                //停用
+                if (drug.getStop() == null) {
+                    drug.setStop(setDrugs(stopLemmas, lemma, new Stop()));
+                }
+                //抗生素使用原因
+                if (drug.getReasonsForAntibiotic() == null) {
+                    drug.setReasonsForAntibiotic(setDrugs(reasonsForAntibioticLemmas, lemma, new ReasonsForAntibiotic()));
+                }
+                drugs.add(drug);
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+            logger.error(e.getMessage(), e);
+        }
+        return drugs;
+    }
+
+    /**
+     * 模型抓取抗生素信息为空的时候判断是否在20个字节之内
+     *
+     * @param Lemmas
+     * @param lemma
+     * @param t
+     * @return
+     * @throws Exception
+     */
+    private <T> T setDrugs(List<Lemma> Lemmas, Lemma lemma, T t) throws Exception {
+        for (Lemma lem : Lemmas) {
+            if (Integer.parseInt(lem.getPosition()) - Integer.parseInt(lemma.getPosition()) <= 20) {
+                BeanUtils.copyProperty(t, "name", lemma.getText());
+                return t;
+            }
+        }
+        return null;
+    }
+}