ソースを参照

push-lis合并到debug分支

louhr 5 年 前
コミット
c3c9f8b5a4
59 ファイル変更2294 行追加381 行削除
  1. 130 0
      algorithm/src/main/java/org/algorithm/core/FilterRule.java
  2. 282 0
      algorithm/src/main/java/org/algorithm/core/RelationTreeUtils.java
  3. 493 0
      algorithm/src/main/java/org/algorithm/core/RuleCheckMachine.java
  4. 3 4
      algorithm/src/main/java/org/algorithm/core/cnn/AlgorithmCNNExecutor.java
  5. 22 0
      algorithm/src/main/java/org/algorithm/core/cnn/AlgorithmCNNExecutorPacs.java
  6. 1 1
      algorithm/src/main/java/org/algorithm/core/cnn/dataset/RelationExtractionDataSet.java
  7. 32 1
      algorithm/src/main/java/org/algorithm/core/cnn/entity/Lemma.java
  8. 40 13
      algorithm/src/main/java/org/algorithm/core/cnn/model/RelationExtractionEnsembleModel.java
  9. 4 4
      algorithm/src/main/java/org/algorithm/core/cnn/model/RelationExtractionModel.java
  10. 2 1
      algorithm/src/main/java/org/algorithm/core/neural/DiagnosisPredictExecutor.java
  11. 30 7
      algorithm/src/main/java/org/algorithm/core/neural/TensorFlowModelLoadFactory.java
  12. 122 13
      algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSet.java
  13. 113 73
      algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSetImpl.java
  14. 11 1
      algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSetImplNonParallel.java
  15. 33 0
      algorithm/src/main/java/org/algorithm/factory/RelationExtractionFactory.java
  16. 5 3
      algorithm/src/main/java/org/algorithm/test/ReEnsembleModelTest.java
  17. 9 4
      algorithm/src/main/java/org/algorithm/test/TensorflowExcutorTest.java
  18. 47 40
      algorithm/src/main/java/org/algorithm/test/Test.java
  19. 46 0
      algorithm/src/main/java/org/algorithm/test/TestDiagnosisFilter.java
  20. 34 0
      algorithm/src/main/java/org/algorithm/test/TestReSplit.java
  21. 15 0
      algorithm/src/main/java/org/algorithm/test/TestRelationTreeUtils.java
  22. 140 0
      algorithm/src/main/java/org/algorithm/test/TestRuleCheckMachine.java
  23. 1 1
      algorithm/src/main/java/org/algorithm/util/MysqlConnector.java
  24. 1 1
      algorithm/src/main/resources/algorithm.properties
  25. 0 6
      common-push/pom.xml
  26. 2 0
      common-push/src/main/java/org/diagbot/common/push/bean/SearchData.java
  27. 33 0
      common-push/src/main/java/org/diagbot/common/push/cache/ApplicationCacheUtil.java
  28. 14 2
      common-push/src/main/java/org/diagbot/common/push/cache/CacheFileManager.java
  29. 14 2
      common-push/src/main/java/org/diagbot/common/push/filter/ClassifyDiag.java
  30. 0 59
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentLis.java
  31. 0 51
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentPacs.java
  32. 10 11
      common-push/src/main/java/org/diagbot/common/push/filter/rule/PretreatmentRule.java
  33. 22 0
      common-push/src/main/java/org/diagbot/common/push/util/PushConstants.java
  34. 13 21
      common-push/src/main/java/org/diagbot/common/push/work/ParamsDataProxy.java
  35. 92 0
      common-push/src/main/java/org/diagbot/common/push/work/RelationExtractionUtil.java
  36. 7 2
      graph-web/src/main/java/org/diagbot/graphWeb/work/GraphCalculate.java
  37. 5 3
      graph/src/main/java/org/diagbot/graph/jdbc/Neo4jAPI.java
  38. 2 2
      graph/src/main/resources/bolt.properties
  39. 11 2
      graphdb/src/main/java/org/diagbot/repository/DiseaseRepository.java
  40. 45 12
      graphdb/src/main/java/org/diagbot/service/impl/KnowledgeServiceImpl.java
  41. 1 1
      nlp-web/src/main/java/org/diagbot/nlp/dao/xml/InfoMapper.xml
  42. 1 1
      nlp-web/src/main/resources/application.yml
  43. 3 2
      nlp/src/main/java/org/diagbot/nlp/feature/FeatureType.java
  44. 17 0
      nlp/src/main/java/org/diagbot/nlp/rule/analyze/RuleAnalyze.java
  45. 1 1
      common-push/src/main/java/org/diagbot/common/push/bean/PreResult.java
  46. 4 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/Pretreatment.java
  47. 2 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentDiag.java
  48. 86 0
      nlp/src/main/java/org/diagbot/nlp/rule/pretreat/PretreatmentLis.java
  49. 2 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentMakeList.java
  50. 2 4
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentNormal.java
  51. 2 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentOther.java
  52. 127 0
      nlp/src/main/java/org/diagbot/nlp/rule/pretreat/PretreatmentPacs.java
  53. 2 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentSymptom.java
  54. 2 2
      common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentVital.java
  55. 1 0
      nlp/src/main/java/org/diagbot/nlp/util/Constants.java
  56. 36 20
      push-web/src/main/java/org/diagbot/push/controller/AlgorithmController.java
  57. 1 0
      push-web/src/main/java/org/diagbot/push/controller/CacheFileManagerController.java
  58. 21 0
      rule/src/main/java/org/diagbot/rule/crisis/CrisisApplication.java
  59. 97 0
      rule/src/main/java/org/diagbot/rule/lis/LisApplication.java

+ 130 - 0
algorithm/src/main/java/org/algorithm/core/FilterRule.java

@@ -0,0 +1,130 @@
+package org.algorithm.core;
+
+import java.util.Map;
+
+/**
+ * 过滤规则
+ *
+ * @Author: bijl
+ * @Date: 2019/9/5 20:21
+ * @Description:
+ */
+public class FilterRule {
+
+    private Integer uuid;
+
+    private String key_1;
+    private String type_1;
+
+    private String key_2;
+    private String type_2;
+
+    private String inside;
+    private String insideType;
+
+    private String despite;
+    private String despiteInside;
+
+    public FilterRule(Map<String, String> aMap) {
+
+        this.key_1 = aMap.get("key_1");
+        this.type_1 = aMap.get("type_1");
+
+        this.key_2 = aMap.get("key_2");
+        this.type_2 = aMap.get("type_2");
+
+        this.inside = aMap.get("inside");
+        this.insideType = aMap.get("inside_type");
+
+        this.despite = aMap.get("despite");
+        this.despiteInside = aMap.get("despite_inside");
+    }
+
+    public Integer getUuid() {
+        return uuid;
+    }
+
+    public void setUuid(Integer uuid) {
+        this.uuid = uuid;
+    }
+
+
+    public String getKey_1() {
+        return key_1;
+    }
+
+    public void setKey_1(String key_1) {
+        this.key_1 = key_1;
+    }
+
+    public String getType_1() {
+        return type_1;
+    }
+
+    public void setType_1(String type_1) {
+        this.type_1 = type_1;
+    }
+
+    public String getKey_2() {
+        return key_2;
+    }
+
+    public void setKey_2(String key_2) {
+        this.key_2 = key_2;
+    }
+
+    public String getType_2() {
+        return type_2;
+    }
+
+    public void setType_2(String type_2) {
+        this.type_2 = type_2;
+    }
+
+    public String getInside() {
+        return inside;
+    }
+
+    public void setInside(String inside) {
+        this.inside = inside;
+    }
+
+    public String getInsideType() {
+        return insideType;
+    }
+
+    public void setInsideType(String insideType) {
+        this.insideType = insideType;
+    }
+
+    public String getDespite() {
+        return despite;
+    }
+
+    public void setDespite(String despite) {
+        this.despite = despite;
+    }
+
+    public String getDespiteInside() {
+        return despiteInside;
+    }
+
+    public void setDespiteInside(String despiteInside) {
+        this.despiteInside = despiteInside;
+    }
+
+    @Override
+    public String toString() {
+        return "FilterRule{" +
+                "uuid=" + uuid +
+                ", key_1='" + key_1 + '\'' +
+                ", type_1='" + type_1 + '\'' +
+                ", key_2='" + key_2 + '\'' +
+                ", type_2='" + type_2 + '\'' +
+                ", inside='" + inside + '\'' +
+                ", insideType='" + insideType + '\'' +
+                ", despite='" + despite + '\'' +
+                ", despiteInside='" + despiteInside + '\'' +
+                '}';
+    }
+}

+ 282 - 0
algorithm/src/main/java/org/algorithm/core/RelationTreeUtils.java

@@ -0,0 +1,282 @@
+package org.algorithm.core;
+
+import org.algorithm.core.cnn.entity.Lemma;
+import org.algorithm.core.cnn.entity.Triad;
+
+import java.util.*;
+
+/**
+ * 关系树工具类
+ *
+ * @Author: bijl
+ * @Date: 2019/9/5 15:16
+ * @Description:
+ */
+public class RelationTreeUtils {
+
+    /**
+     * 同名实体(这里也叫词项)归并
+     * 规则:
+     * 1- 直接替代为位置最前面的一个
+     *
+     * @param triads 实体对列表
+     */
+    public static void sameTextLemmaMerge(List<Triad> triads) {
+
+        Map<String, Lemma> lemmaMap = new HashMap<>();
+        for (Triad triad : triads) {
+            Lemma l1 = triad.getL_1();
+            Lemma l2 = triad.getL_2();
+
+            if (lemmaMap.get(l1.getText()) == null)
+                lemmaMap.put(l1.getText(), l1);
+            else {
+                Lemma l1Pre = lemmaMap.get(l1.getText());
+                if (l1Pre.getStartPosition() > l1.getStartPosition())
+                    triad.setL_1(l1);  // 取靠前的
+            }
+
+            if (lemmaMap.get(l2.getText()) == null)
+                lemmaMap.put(l2.getText(), l2);
+            else {
+                Lemma l2Pre = lemmaMap.get(l2.getText());
+                if (l2Pre.getStartPosition() > l2.getStartPosition())
+                    triad.setL_2(l2);  // 取靠前的
+            }
+        }
+        for (Triad triad : triads) {
+            Lemma l1 = triad.getL_1();
+            Lemma l2 = triad.getL_2();
+            triad.setL_1(lemmaMap.get(l1.getText()));  // 用前面的同名实体(这里也叫词项)替代后面的
+            triad.setL_2(lemmaMap.get(l2.getText()));  // 用前面的同名实体(这里也叫词项)替代后面的
+        }
+    }
+
+    /**
+     * 构建关系树
+     * 基本规则:
+     * 1- 两个有关系的实体,前面的为父节点,后面的为子节点
+     *
+     * @param triads 有关系的三元组列表
+     */
+    public static void buildRelationTree(List<Triad> triads) {
+        for (Triad triad : triads) {
+            Lemma l1 = triad.getL_1();
+            Lemma l2 = triad.getL_2();
+            if (l1.getStartPosition() < l2.getStartPosition()) {  // 在前者为父节点
+                l1.setHaveChildren(true);
+                l2.setParent(l1);
+            } else {
+                l2.setHaveChildren(true);
+                l1.setParent(l2);
+            }
+        }
+    }
+
+    /**
+     * 获取关系树的分枝
+     *
+     * @param triads 有关系,并且设置了父子节点关系的三元组
+     */
+    public static List<List<String>> getRelationTreeBranches(List<Triad> triads) {
+        Map<Lemma, Integer> leafNodeLemmas = new HashMap<>();
+
+        for (Triad triad : triads) {
+            if (!triad.getL_1().isHaveChildren())
+                leafNodeLemmas.putIfAbsent(triad.getL_1(), 1);
+
+            if (!triad.getL_2().isHaveChildren())
+                leafNodeLemmas.putIfAbsent(triad.getL_2(), 1);
+        }
+
+        List<List<String>> branches = new ArrayList<>();
+        for (Lemma lemma : leafNodeLemmas.keySet()) {
+            List<Lemma> aBranch = new ArrayList<>();
+            while (lemma != null) {
+                aBranch.add(lemma);
+                lemma = lemma.getParent();
+            }
+            aBranch.sort(Comparator.naturalOrder());  // 按位置排序
+            branches.addAll(handleBranch(aBranch));
+        }
+
+
+        return branches;
+    }
+
+    /**
+     * 处理分枝,要求组合非阴性词,阴性词必须包含
+     * 操作:
+     * 1- 分离阴性词和非阴性词
+     * 2- 组合非阴性词
+     * 3- 添加阴性词到组合结果中
+     *
+     * @param aBranch
+     * @return
+     */
+    private static List<List<String>> handleBranch(List<Lemma> aBranch) {
+        List<Lemma> nonNegativeLemmas = new ArrayList<>();
+        List<Lemma> negativeLemmas = new ArrayList<>();
+        for (Lemma lemma : aBranch) {
+            if ("反意或虚拟".equals(lemma.getProperty()))
+                negativeLemmas.add(lemma);
+            else
+                nonNegativeLemmas.add(lemma);
+        }
+        List<List<Lemma>> nonNegativeLemmaCombinations = new ArrayList<>();
+        if (nonNegativeLemmas.size() > 0) {
+            for (int i = 1; i <= nonNegativeLemmas.size(); i++) {
+                combinerSelect(nonNegativeLemmas, new ArrayList<>(), nonNegativeLemmaCombinations,
+                        nonNegativeLemmas.size(), i);
+            }
+        }
+        List<List<String>> result = new ArrayList<>();
+        for (List<Lemma> lemmaCombination : nonNegativeLemmaCombinations) {
+            List<String> lemmaNames = new ArrayList<>();
+            lemmaCombination.addAll(negativeLemmas);  // 阴性词加入到组合中
+            lemmaCombination.sort(Comparator.naturalOrder());  // 按位置排序
+            for (Lemma lemma : lemmaCombination)  // 取出名称
+                lemmaNames.add(lemma.getText());
+            if (lemmaNames.size() >= 2)
+                result.add(lemmaNames);
+        }
+
+        return result;
+
+    }
+
+    /**
+     * 从三元组列表到关系树分枝
+     *
+     * @param triads
+     * @return
+     */
+    public static List<List<String>> triadsToRelationTreeBranches(List<Triad> triads) {
+//        sameTextLemmaMerge(triads);
+        buildRelationTree(triads);
+        return getRelationTreeBranches(triads);
+    }
+
+    /**
+     * 组合生成器
+     *
+     * @param data      原始数据
+     * @param workSpace 自定义一个临时空间,用来存储每次符合条件的值
+     * @param k         C(n,k)中的k
+     */
+    private static <E> void combinerSelect(List<E> data, List<E> workSpace, List<List<E>> result, int n, int k) {
+        List<E> copyData;
+        List<E> copyWorkSpace = null;
+
+        if (workSpace.size() == k) {
+//            for (E c : workSpace)
+//                System.out.print(c);
+
+            result.add(new ArrayList<>(workSpace));
+//            System.out.println();
+        }
+
+        for (int i = 0; i < data.size(); i++) {
+            copyData = new ArrayList<E>(data);
+            copyWorkSpace = new ArrayList<E>(workSpace);
+
+            copyWorkSpace.add(copyData.get(i));
+            for (int j = i; j >= 0; j--)
+                copyData.remove(j);
+            combinerSelect(copyData, copyWorkSpace, result, n, k);
+        }
+    }
+
+    /**
+     * 全排列算法
+     *
+     * @param stringList 字符串列表
+     * @return
+     */
+    public static ArrayList<ArrayList<String>> permute(List<String> stringList) {
+        ArrayList<ArrayList<String>> result = new ArrayList<ArrayList<String>>();
+        result.add(new ArrayList<String>());
+
+        for (int i = 0; i < stringList.size(); i++) {
+            //list of list in current iteration of the stringList num
+            ArrayList<ArrayList<String>> current = new ArrayList<ArrayList<String>>();
+
+            for (ArrayList<String> l : result) {
+                // # of locations to insert is largest index + 1
+                for (int j = 0; j < l.size() + 1; j++) {
+                    // + add num[i] to different locations
+                    l.add(j, stringList.get(i));
+
+                    ArrayList<String> temp = new ArrayList<String>(l);
+                    current.add(temp);
+
+                    // - remove num[i] add
+                    l.remove(j);
+                }
+            }
+
+            result = new ArrayList<>(current);
+        }
+
+        return result;
+    }
+
+
+    /**
+     * 测试文件
+     */
+    public static void test() {
+
+        List<Triad> triads = new ArrayList<>();
+        String[] arr_1 = {"子宫", "0,1", "部位"};
+        String[] arr_2 = {"内膜", "2,3", "结构"};
+        addTriad(arr_1, arr_2, triads);
+
+        String[] arr_1_1 = {"不", "13,13", "反意或虚拟"};
+        String[] arr_2_1 = {"出血", "10,11", "形容词"};
+        addTriad(arr_1_1, arr_2_1, triads);
+
+        String[] arr_1_2 = {"胸部", "15,16", "部位"};
+        String[] arr_2_2 = {"剧烈", "17,18", "程度"};
+        addTriad(arr_1_2, arr_2_2, triads);
+
+        String[] arr_1_3 = {"疼痛", "17,18", "形容词"};
+        String[] arr_2_3 = {"剧烈", "19,20", "程度"};
+        addTriad(arr_1_3, arr_2_3, triads);
+
+        String[] arr_1_4 = {"内膜", "2,3", "结构"};
+        String[] arr_2_4 = {"出血", "10,11", "形容词"};
+        addTriad(arr_1_4, arr_2_4, triads);
+
+        System.out.println(triads.size());
+        sameTextLemmaMerge(triads);
+        buildRelationTree(triads);
+        List<List<String>> info = getRelationTreeBranches(triads);
+
+        System.out.println(info);
+    }
+
+    /**
+     * 增加三元组
+     */
+    private static void addTriad(String[] lemma_1, String[] lemma_2, List<Triad> triads) {
+        Lemma lemma1 = new Lemma();
+        lemma1.setText(lemma_1[0]);
+        lemma1.setPosition(lemma_1[1]);
+        lemma1.setProperty(lemma_1[2]);
+
+        Lemma lemma2 = new Lemma();
+        lemma2.setText(lemma_2[0]);
+        lemma2.setPosition(lemma_2[1]);
+        lemma2.setProperty(lemma_2[2]);
+
+        Triad triad = new Triad();
+        triad.setL_1(lemma1);
+        triad.setL_2(lemma2);
+
+        triads.add(triad);
+
+    }
+
+
+}

+ 493 - 0
algorithm/src/main/java/org/algorithm/core/RuleCheckMachine.java

@@ -0,0 +1,493 @@
+package org.algorithm.core;
+
+import org.algorithm.core.cnn.entity.Lemma;
+import org.algorithm.core.cnn.entity.Triad;
+import org.algorithm.util.MysqlConnector;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.*;
+
+/**
+ * 规则检查机器
+ *
+ * @Author: bijl
+ * @Date: 2019/9/6 10:32
+ * @Description:
+ */
+public class RuleCheckMachine {
+    private final List<FilterRule> filterRules = new ArrayList<>();
+    private Map<String, Map<String, Set<Integer>>> key_1_map = null;
+    private Map<String, Map<String, Set<Integer>>> key_2_map = null;
+    private Map<String, String> punctuations = new HashMap<>();
+    private Map<String, Set<Integer>> despiteMap = null;  // 实体名:[规则uuid列表]
+    private Map<String, Set<Integer>> despiteInsideMap = null; // 实体名:[规则uuid列表]
+    private Map<String, Map<String, Set<Integer>>> insideMap = null;
+
+
+    public RuleCheckMachine() {
+        this.loadRules();
+        this.makeKey1Map();
+        this.makeKey2Map();
+        this.makeInsideMap();
+        this.makeDespiteMap();
+        this.makeDespiteInsideMap();
+    }
+
+
+    /**
+     * 加载规则
+     */
+    public void loadRules() {
+        /**
+         * 连接数据库
+         */
+        String url = "jdbc:mysql://192.168.2.235/test_case?user=root&password=diagbot@20180822";
+        MysqlConnector connector = new MysqlConnector(url);
+        String querySql =
+                "SELECT rr.key_1, rr.type_1, rr.key_2, rr.type_2, rr.inside, rr.inside_type, " +
+                        "rr.despite, rr.despite_inside " +
+                        "FROM relation_neg_rules AS rr " +
+                        "WHERE rr.`status` = 1";
+
+        ResultSet rs = connector.query(querySql);
+        Integer uuid = 0;
+        try {
+            while (rs.next()) {
+                String key_1 = rs.getString("key_1");
+                String type_1 = rs.getString("type_1");
+
+                String key_2 = rs.getString("key_2");
+                String type_2 = rs.getString("type_2");
+
+                String inside = rs.getString("inside");
+                String inside_type = rs.getString("inside_type");
+
+                String despite = rs.getString("despite");
+                String despite_inside = rs.getString("despite_inside");
+
+                String[] despiteSplit = despite.split(",");
+                String[] despiteInsideSplit = despite_inside.split(",");
+                for (int j = 0; j < despiteSplit.length; j++) {
+                    for (int k = 0; k < despiteInsideSplit.length; k++) {
+                        Map<String, String> variableMap = new HashMap<>();
+                        variableMap.put("key_1", key_1);
+                        variableMap.put("type_1", type_1);
+
+                        variableMap.put("key_2", key_2);
+                        variableMap.put("type_2", type_2);
+
+                        variableMap.put("inside", inside);
+                        variableMap.put("inside_type", inside_type);
+
+                        variableMap.put("despite", despiteSplit[j]);
+                        variableMap.put("despite_inside", despiteInsideSplit[k]);
+
+                        FilterRule filterRule = new FilterRule(variableMap);
+                        filterRule.setUuid(uuid);
+                        this.filterRules.add(filterRule);
+
+//                            System.out.println(filterRule);
+
+                        uuid += 1;
+                    }
+                }
+
+            }
+
+        } catch (SQLException e) {
+            e.printStackTrace();
+            throw new RuntimeException("加载规则字典失败");
+        } finally {
+            connector.close();
+        }
+    }
+
+    /**
+     * 制作实体1相关信息字典
+     */
+    private void makeKey1Map() {
+        Map<String, Map<String, Set<Integer>>> key_1_map_ = new HashMap<>();
+        Map<String, Set<Integer>> emptyMap = new HashMap<>();
+        Map<String, Set<Integer>> typeMap = new HashMap<>();
+        Map<String, Set<Integer>> wordMap = new HashMap<>();
+        key_1_map_.put("", emptyMap);
+        key_1_map_.put("type", typeMap);
+        key_1_map_.put("word", wordMap);
+
+        for (FilterRule rule : this.filterRules) {
+            String key_1 = rule.getKey_1();
+            String type_1 = rule.getType_1();
+            Integer uuid = rule.getUuid();
+
+            this.inputMaps(key_1, type_1, uuid, emptyMap, typeMap, wordMap, null);
+        }
+        this.key_1_map = key_1_map_;
+    }
+
+
+    /**
+     * 制作实体2相关信息字典
+     */
+    private void makeKey2Map() {
+        Map<String, Map<String, Set<Integer>>> key_2_map_ = new HashMap<>();
+        Map<String, Set<Integer>> emptyMap = new HashMap<>();
+        Map<String, Set<Integer>> typeMap = new HashMap<>();
+        Map<String, Set<Integer>> wordMap = new HashMap<>();
+        key_2_map_.put("", emptyMap);
+        key_2_map_.put("type", typeMap);
+        key_2_map_.put("word", wordMap);
+
+        for (FilterRule rule : this.filterRules) {
+            String key_2 = rule.getKey_2();
+            String type_2 = rule.getType_2();
+            Integer uuid = rule.getUuid();
+
+            this.inputMaps(key_2, type_2, uuid, emptyMap, typeMap, wordMap, null);
+        }
+        this.key_2_map = key_2_map_;
+    }
+
+    /**
+     * 制作内部实体相关信息字典
+     */
+    private void makeInsideMap() {
+        Map<String, Map<String, Set<Integer>>> insideMap_ = new HashMap<>();
+        Map<String, Set<Integer>> punctuationMap = new HashMap<>();
+        Map<String, Set<Integer>> typeMap = new HashMap<>();
+        Map<String, Set<Integer>> typePunctuationMap = new HashMap<>();
+        Map<String, Set<Integer>> wordMap = new HashMap<>();
+        insideMap_.put("punc", punctuationMap);
+        insideMap_.put("type", typeMap);
+        insideMap_.put("typePunctuation", typePunctuationMap);
+        insideMap_.put("word", wordMap);
+
+        for (FilterRule rule : this.filterRules) {
+            String inside = rule.getInside();
+            String insideType = rule.getInsideType();
+            Integer uuid = rule.getUuid();
+            if (insideType.equals("punc"))
+                this.punctuations.put(inside, inside);
+
+            if (",".equals(inside.substring(0, 1)))
+                this.inputMaps(inside, insideType, uuid, null, typePunctuationMap, wordMap, punctuationMap);
+            else
+                this.inputMaps(inside, insideType, uuid, null, typeMap, wordMap, punctuationMap);
+        }
+        this.insideMap = insideMap_;
+    }
+
+    /**
+     * maps输入
+     *
+     * @param key
+     * @param type
+     * @param uuid
+     * @param emptyMap
+     * @param typeMap
+     * @param wordMap
+     */
+    private void inputMaps(String key, String type, Integer uuid, Map<String, Set<Integer>> emptyMap,
+                           Map<String, Set<Integer>> typeMap, Map<String, Set<Integer>> wordMap,
+                           Map<String, Set<Integer>> punctuationMap) {
+
+        if ("".equals(type)) {
+            if (emptyMap.get(key) == null)
+                emptyMap.put(key, new HashSet<>());
+            emptyMap.get(key).add(uuid);
+        } else if ("type".equals(type)) {
+            if (typeMap.get(key) == null)
+                typeMap.put(key, new HashSet<>());
+            typeMap.get(key).add(uuid);
+        } else if ("word".equals(type)) {
+            if (wordMap.get(key) == null)
+                wordMap.put(key, new HashSet<>());
+            wordMap.get(key).add(uuid);
+        } else if ("punc".equals(type)) {
+            if (punctuationMap.get(key) == null)
+                punctuationMap.put(key, new HashSet<>());
+            punctuationMap.get(key).add(uuid);
+        } else {
+            throw new RuntimeException("出现了位置新type");
+        }
+
+    }
+
+
+    /**
+     * 制作例外字典
+     */
+    private void makeDespiteMap() {
+        Map<String, Set<Integer>> despiteMap = new HashMap<>();
+        for (FilterRule rule : this.filterRules) {
+            String despite = rule.getDespite();
+            if (!despite.equals("")) {  // 空白不收录
+                if (despiteMap.get(despite) == null) {
+                    despiteMap.put(despite, new HashSet<>());
+                }
+                despiteMap.get(despite).add(rule.getUuid());  //
+            }
+        }
+        this.despiteMap = despiteMap;
+    }
+
+
+    /**
+     * 制作例外_内部字典
+     */
+    private void makeDespiteInsideMap() {
+        Map<String, Set<Integer>> despiteInsideMap = new HashMap<>();
+        for (FilterRule rule : this.filterRules) {
+            String despiteInside = rule.getDespiteInside();
+            if (!despiteInside.equals("")) {  // 空白不收录
+                if (despiteInsideMap.get(despiteInside) == null) {
+                    despiteInsideMap.put(despiteInside, new HashSet<>());
+                }
+                despiteInsideMap.get(despiteInside).add(rule.getUuid());  //
+            }
+        }
+        this.despiteInsideMap = despiteInsideMap;
+    }
+
+    /**
+     * 名称—类别—开始位置类
+     */
+    class NameTypeStartPosition implements Comparable<NameTypeStartPosition> {
+        private String name;
+        private String type;
+        private int startPosition;
+
+        public NameTypeStartPosition(String name, String type, int startPosition) {
+            this.name = name;
+            this.type = type;
+            this.startPosition = startPosition;
+        }
+
+        @Override
+        public int compareTo(NameTypeStartPosition o) {
+            return this.startPosition - o.getStartPosition();
+        }
+
+        public String getName() {
+            return name;
+        }
+
+        public void setName(String name) {
+            this.name = name;
+        }
+
+        public String getType() {
+            return type;
+        }
+
+        public void setType(String type) {
+            this.type = type;
+        }
+
+        public int getStartPosition() {
+            return startPosition;
+        }
+
+        public void setStartPosition(int startPosition) {
+            this.startPosition = startPosition;
+        }
+
+        @Override
+        public String toString() {
+            return "NameTypeStartPosition{" +
+                    "name='" + name + '\'' +
+                    ", type='" + type + '\'' +
+                    ", startPosition=" + startPosition +
+                    '}';
+        }
+
+    }
+
+    /**
+     * 获取已排序的(名称,类别,开始位置)对象
+     *
+     * @param triads
+     * @return
+     */
+    public List<NameTypeStartPosition> getSortedNameTypeByPosition(List<Triad> triads) {
+        List<NameTypeStartPosition> nameTypeStartPositions = new ArrayList<>();
+        for (Triad triad : triads) {
+            Lemma l1 = triad.getL_1();
+            Lemma l2 = triad.getL_2();
+            nameTypeStartPositions.add(
+                    new NameTypeStartPosition(l1.getText(), l1.getProperty(), l1.getStartPosition()));
+            nameTypeStartPositions.add(
+                    new NameTypeStartPosition(l2.getText(), l2.getProperty(), l2.getStartPosition()));
+        }
+        nameTypeStartPositions.sort(Comparator.naturalOrder());
+
+        return nameTypeStartPositions;
+    }
+
+    /**
+     * 是否移除
+     *
+     * @param nameTypeStartPositions
+     * @param startIndex
+     * @param endIndex
+     * @return
+     */
+    public boolean isRemove(List<NameTypeStartPosition> nameTypeStartPositions, int startIndex, int endIndex,
+                            String sentence) {
+        Set<Integer> remainUuids = new HashSet<>();  // 剩余规则的uuid
+        for (FilterRule rule : this.filterRules)
+            remainUuids.add(rule.getUuid());
+
+        // 过滤实体名称触发例外条件情况
+        String entity_1_name = nameTypeStartPositions.get(startIndex).getName();
+        String entity_1_type = nameTypeStartPositions.get(startIndex).getType();
+
+        String entity_2_name = nameTypeStartPositions.get(endIndex).getType();
+        String entity_2_type = nameTypeStartPositions.get(endIndex).getType();
+
+        Set<Integer> set = null;
+        set = this.despiteMap.get(entity_1_name);  // 过滤有实体1名为例外情况(即,不成立)的规则(的uuid)
+        this.removeAll(remainUuids, set);
+
+        set = this.despiteMap.get(entity_2_name);  // 过滤有实体2名为例外情况(即,不成立)的规则(的uuid)
+        this.removeAll(remainUuids, set);
+
+        // 过滤中间实体的名称触发例外条件情况
+        for (int i = startIndex; i <= endIndex; i++) {
+            NameTypeStartPosition nameTypeStartPosition = nameTypeStartPositions.get(i);
+            set = this.despiteInsideMap.get(nameTypeStartPosition.getName());
+            this.removeAll(remainUuids, set);
+        }
+
+        // 三板斧过滤
+        // 实体1,过滤
+        set = new HashSet<>();
+        this.addAll(set, this.key_1_map.get("").get(""));
+        // 满足,形如("形容词", "type") 过滤条件的规则
+        this.addAll(set, this.key_1_map.get("type").get(entity_1_type));
+        // 满足,形如("胸痛", "word") 过滤条件的规则
+        this.addAll(set, this.key_1_map.get("word").get(entity_1_name));
+        this.retainAll(remainUuids, set);  // 求交集,同事满足实体1相关的过滤条件,且不不满足例外情况
+        if (remainUuids.size() == 0)
+            return false;
+
+        // 实体2,过滤
+        set = new HashSet<>();
+        this.addAll(set, this.key_2_map.get("").get(""));
+        // 满足,形如("形容词", "type") 过滤条件的规则
+        this.addAll(set, this.key_2_map.get("type").get(entity_2_type));
+        // 满足,形如("胸痛", "word") 过滤条件的规则
+        this.addAll(set, this.key_2_map.get("word").get(entity_2_name));
+        this.retainAll(remainUuids, set);  // 求交集,同事满足实体1相关的过滤条件,且不不满足例外情况
+        if (remainUuids.size() == 0)
+            return false;
+
+        // 中间实体过滤
+        set = new HashSet<>();
+        for (int i = startIndex; i <= endIndex; i++) {
+            NameTypeStartPosition nameTypeStartPosition = nameTypeStartPositions.get(i);
+            // 中间实体满足,形如("胸痛", "word") 过滤条件的规则
+            this.addAll(set, this.insideMap.get("word").get(nameTypeStartPosition.getName()));
+            // 中间实体满足,形如(";", "punc") 过滤条件的规则
+            this.addAll(set, this.insideMap.get("type").get(nameTypeStartPosition.getType()));  // 没有逗号的
+        }
+
+        int entity_1_start = nameTypeStartPositions.get(startIndex).getStartPosition();
+        int entity_2_start = nameTypeStartPositions.get(endIndex).getStartPosition();
+
+        // 标点过滤
+        String aPunc = null;
+        for (int i=entity_1_start; i<entity_2_start;i++){
+            aPunc = sentence.substring(i, i+1);
+            if (this.punctuations.get(aPunc) != null)
+                this.addAll(set, this.insideMap.get("punc").get(aPunc));
+        }
+
+        // 中文和英文逗号+属性 过滤
+        String[] commas = {",", ","};
+        int commaIndex = 0;
+        String commaPadType = null;  // 逗号拼接上类型
+        for (String comma: commas) {
+            commaIndex = sentence.indexOf(comma, entity_1_start + 1);  // 逗号位置
+            while (commaIndex > -1 && commaIndex < entity_2_start) {
+                commaIndex = sentence.indexOf(comma, commaIndex + 1);  // 下一个逗号
+                for (int i = startIndex; i <= endIndex; i++) {  // 每个逗号与后面的所有实体都匹配一次
+                    NameTypeStartPosition nameTypeStartPosition = nameTypeStartPositions.get(i);
+                    if (nameTypeStartPosition.getStartPosition() > commaIndex) {
+                        commaPadType = "," + nameTypeStartPosition.getType();
+                        this.addAll(set, this.insideMap.get("typePunctuation").get(commaPadType));
+                    }
+
+                }
+            }
+
+        }
+
+        this.retainAll(remainUuids, set);  // 求交集,同事中间实体相关的过滤条件,且不不满足例外情况
+
+//        for (FilterRule rule: this.filterRules) {
+//            if (remainUuids.contains(rule.getUuid()))
+//                System.out.println(rule);
+//
+//        }
+
+        return remainUuids.size() > 0;  // 还有规则满足,则过滤
+
+    }
+
+    /**
+     * 求差集,避免null和空集
+     *
+     * @param basicSet
+     * @param set
+     */
+    private void removeAll(Set<Integer> basicSet, Set<Integer> set) {
+        if (set != null && set.size() > 0)
+            basicSet.removeAll(set);
+    }
+
+    /**
+     * 求交集,避免null和空集
+     *
+     * @param basicSet
+     * @param set
+     */
+    private void addAll(Set<Integer> basicSet, Set<Integer> set) {
+        if (set != null && set.size() > 0)
+            basicSet.addAll(set);
+    }
+
+    /**
+     * 求并集,避免null和空集
+     *
+     * @param basicSet
+     * @param set
+     */
+    private void retainAll(Set<Integer> basicSet, Set<Integer> set) {
+        if (set != null && set.size() > 0)
+            basicSet.retainAll(set);
+    }
+
+    /**
+     * 检查并移除
+     *
+     * @param sentence 句子
+     * @param triads 三元组列表
+     */
+    public void checkAndRemove(String sentence, List<Triad> triads) {
+        List<NameTypeStartPosition> nameTypeStartPositions = this.getSortedNameTypeByPosition(triads);
+        Map<Integer, Integer> startPositionToIndexMap = new HashMap<>();
+        for (int i = 0; i < nameTypeStartPositions.size(); i++)
+            startPositionToIndexMap.put(nameTypeStartPositions.get(i).getStartPosition(), i);
+
+        Iterator<Triad> it = triads.iterator();
+        while (it.hasNext()) {  // 遍历三元组,移除满足过滤规则的
+            Triad triad = it.next();
+            int startIndex = startPositionToIndexMap.get(triad.getL_1().getStartPosition());
+            int endIndex = startPositionToIndexMap.get(triad.getL_2().getStartPosition());
+            if (isRemove(nameTypeStartPositions, startIndex, endIndex, sentence)) {
+                it.remove();
+            }
+        }
+    }
+}

+ 3 - 4
algorithm/src/main/java/org/algorithm/core/cnn/AlgorithmCNNExecutor.java

@@ -1,13 +1,12 @@
 package org.algorithm.core.cnn;
 
-import org.algorithm.core.cnn.entity.Lemma;
 import org.algorithm.core.cnn.entity.Triad;
 
 import java.util.List;
 
 /**
  * @ClassName org.algorithm.core.cnn.model.AlgorithmCNNExecutor
- * @Description TODO
+ * @Description
  * @Author fyeman
  * @Date 2019/1/17/017 19:18
  * @Version 1.0
@@ -16,8 +15,8 @@ public abstract class AlgorithmCNNExecutor {
     /**
      *
      * @param content 输入句子
-     * @param triads 实体列表
-     * @return
+     * @param triads 实体列表(三元组列表)
+     * @return  [[有关系的一系列词]]
      */
     public abstract List<Triad> execute(String content, List<Triad> triads);
 }

+ 22 - 0
algorithm/src/main/java/org/algorithm/core/cnn/AlgorithmCNNExecutorPacs.java

@@ -0,0 +1,22 @@
+package org.algorithm.core.cnn;
+
+import org.algorithm.core.cnn.entity.Triad;
+
+import java.util.List;
+
+/**
+ * @ClassName org.algorithm.core.cnn.model.AlgorithmCNNExecutor
+ * @Description
+ * @Author fyeman
+ * @Date 2019/1/17/017 19:18
+ * @Version 1.0
+ **/
+public abstract class AlgorithmCNNExecutorPacs {
+    /**
+     *
+     * @param content 输入句子
+     * @param triads 实体列表(三元组列表)
+     * @return  [[有关系的一系列词]]
+     */
+    public abstract List<List<String>>  execute(String content, List<Triad> triads);
+}

+ 1 - 1
algorithm/src/main/java/org/algorithm/core/cnn/dataset/RelationExtractionDataSet.java

@@ -17,7 +17,7 @@ import com.alibaba.fastjson.JSONObject;
 public class RelationExtractionDataSet {
 
     private Map<String, Integer> char2id = new HashMap<>();
-    public final int MAX_LEN = 512;
+    public final int MAX_LEN = 256;
 
 
     public RelationExtractionDataSet(String dir) {

+ 32 - 1
algorithm/src/main/java/org/algorithm/core/cnn/entity/Lemma.java

@@ -10,12 +10,38 @@ import java.util.List;
  * @Date 2019/1/17/017 19:15
  * @Version 1.0
  **/
-public class Lemma {
+public class Lemma implements Comparable<Lemma> {
     private String text;
     private String position;
     private int len;
     private String property;
 
+    private Lemma parent;
+
+    private boolean haveChildren = false;
+
+    public boolean isHaveChildren() {
+        return haveChildren;
+    }
+
+    public void setHaveChildren(boolean haveChildren) {
+        this.haveChildren = haveChildren;
+    }
+
+    public Lemma getParent() {
+        return parent;
+    }
+
+    public void setParent(Lemma parent) {
+        this.parent = parent;
+    }
+
+    public int getStartPosition() {
+        String[] pos = this.position.split(",");
+        return Integer.parseInt(pos[0]);
+    }
+
+
     private List<Lemma> relationLemmas = new ArrayList<>();
 
     public String getText() {
@@ -64,4 +90,9 @@ public class Lemma {
         }
         relationLemmas.add(l);
     }
+
+    @Override
+    public int compareTo(Lemma o) {
+        return this.getStartPosition() - o.getStartPosition();
+    }
 }

+ 40 - 13
algorithm/src/main/java/org/algorithm/core/cnn/model/RelationExtractionEnsembleModel.java

@@ -1,6 +1,8 @@
 package org.algorithm.core.cnn.model;
 
-import org.algorithm.core.cnn.AlgorithmCNNExecutor;
+import org.algorithm.core.RelationTreeUtils;
+import org.algorithm.core.RuleCheckMachine;
+import org.algorithm.core.cnn.AlgorithmCNNExecutorPacs;
 import org.algorithm.core.cnn.dataset.RelationExtractionDataSet;
 import org.algorithm.core.cnn.entity.Triad;
 import org.diagbot.pub.utils.PropertiesUtil;
@@ -21,7 +23,7 @@ import java.util.concurrent.*;
  * @Date: 2019/1/22 10:21
  * @Description: 集成模型
  */
-public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutor {
+public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutorPacs {
     private final String X_PLACEHOLDER = "X";
     private final String PREDICTION = "prediction/prediction";
     private final int NUM_LABEL = 1;
@@ -30,8 +32,10 @@ public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutor {
     private RelationExtractionDataSet dataSet;
     private RelationExtractionSubModel[] subModels = new RelationExtractionSubModel[2];
     private ExecutorService executorService = Executors.newCachedThreadPool();
+    private final RuleCheckMachine ruleCheckMachine = new RuleCheckMachine();
 
     public RelationExtractionEnsembleModel() {
+        // 解析路径
         PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
 
         String modelsPath = prop.getProperty("basicPath");  // 模型基本路径
@@ -39,18 +43,20 @@ public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutor {
         dataSetPath = dataSetPath + File.separator + "char2id.json";
         String exportDir = modelsPath.replace("model_version_replacement", "ensemble_model_2");
 
+        // 加载数据集和初始化集成模型
         this.dataSet = new RelationExtractionDataSet(dataSetPath);
         this.init(exportDir);
 
+        // 添加子模型系数,并加载子模型cnn_1d_low
         Map<String, Tensor<Float>> cnn_1d_low_map = new HashMap<>();
-        cnn_1d_low_map.put("keep_prob",Tensor.create(1.0f, Float.class));
+        cnn_1d_low_map.put("keep_prob", Tensor.create(1.0f, Float.class));
         subModels[0] = new RelationExtractionSubModel("cnn_1d_low", cnn_1d_low_map);
-//        subModels[1] = new RelationExtractionSubModel("cnn_1d_lstm_low");
 
+        // 添加子模型系数,并加载子模型lstm_low_api
         Map<String, Tensor<Float>> lstm_low_api_map = new HashMap<>();
-        lstm_low_api_map.put("input_keep_prob",Tensor.create(1.0f, Float.class));
-        lstm_low_api_map.put("output_keep_prob",Tensor.create(1.0f, Float.class));
-        lstm_low_api_map.put("state_keep_prob",Tensor.create(1.0f, Float.class));
+        lstm_low_api_map.put("input_keep_prob", Tensor.create(1.0f, Float.class));
+        lstm_low_api_map.put("output_keep_prob", Tensor.create(1.0f, Float.class));
+        lstm_low_api_map.put("state_keep_prob", Tensor.create(1.0f, Float.class));
         subModels[1] = new RelationExtractionSubModel("lstm_low_api", lstm_low_api_map);
     }
 
@@ -92,12 +98,24 @@ public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutor {
         return inputValues;
     }
 
+
+    /**
+     * 数据预处理,包括过滤,等操作
+     * @param content
+     * @param triads
+     */
+    private void preProcess(String content, List<Triad> triads){
+        if (!(content.length() > this.dataSet.MAX_LEN) && triads.size() > 0) // 句子长度不超过MAX_LEN,有三元组
+            this.ruleCheckMachine.checkAndRemove(content, triads);
+    }
+
     @Override
-    public List<Triad> execute(String content, List<Triad> triads) {
-        // 句子长度不超过MAX_LEN,有三元组
-        if (content.length() > this.dataSet.MAX_LEN || triads.size() < 1) {
-            return new ArrayList<>();
-        }
+    public List<List<String>> execute(String content, List<Triad> triads) {
+        // 预处理
+        this.preProcess(content, triads);
+        if (content.length() > this.dataSet.MAX_LEN || triads.size() < 1)  // 句子长度不超过MAX_LEN,有三元组
+            return null;
+
         int[][] inputValues = this.convertData(content, triads);  // shape = [3, batchSize * this.subModels.length]
         int batchSize = triads.size();
 
@@ -159,7 +177,16 @@ public class RelationExtractionEnsembleModel extends AlgorithmCNNExecutor {
         for (Triad triad : deleteTriads)
             triads.remove(triad);
 
-        return triads;
+        return this.triadsToRelationTreeBranches(triads);
+    }
+
+    /**
+     * 从三元组列表到关系树分枝
+     * @param triads
+     * @return
+     */
+    public List<List<String>> triadsToRelationTreeBranches(List<Triad> triads) {
+        return RelationTreeUtils.triadsToRelationTreeBranches(triads);
     }
 
 

+ 4 - 4
algorithm/src/main/java/org/algorithm/core/cnn/model/RelationExtractionModel.java

@@ -4,7 +4,7 @@ import com.alibaba.fastjson.JSON;
 import com.alibaba.fastjson.JSONArray;
 import com.alibaba.fastjson.JSONObject;
 import com.alibaba.fastjson.TypeReference;
-import org.algorithm.core.cnn.AlgorithmCNNExecutor;
+import org.algorithm.core.cnn.AlgorithmCNNExecutorPacs;
 import org.algorithm.core.cnn.dataset.RelationExtractionDataSet;
 import org.algorithm.core.cnn.entity.LemmaInfo;
 import org.algorithm.core.cnn.entity.Triad;
@@ -21,7 +21,7 @@ import java.util.List;
  * @Date: 2019/1/22 10:21
  * @Decription:
  */
-public class RelationExtractionModel extends AlgorithmCNNExecutor {
+public class RelationExtractionModel extends AlgorithmCNNExecutorPacs {
 //    self.X = tf.placeholder(tf.int32, shape=[None, self.max_length], name='X')
 //    self.pos1 = tf.placeholder(tf.int32, shape=[None, self.max_length], name='pos1')
 //    self.pos2 = tf.placeholder(tf.int32, shape=[None, self.max_length], name='pos2')
@@ -54,7 +54,7 @@ public class RelationExtractionModel extends AlgorithmCNNExecutor {
     }
 
     @Override
-    public List<Triad> execute(String content, List<Triad> triads) {
+    public List<List<String>> execute(String content, List<Triad> triads) {
 //        List<Lemma[]> combinations = new ArrayList<>();
 //        // 组合
 //        for(int i=0; i < lemmas.size() - 1; i++){  // 两两组合成实体对
@@ -83,7 +83,7 @@ public class RelationExtractionModel extends AlgorithmCNNExecutor {
 //            }
 //
 //        }
-        return triads;
+        return null;
     }
 
     /**

+ 2 - 1
algorithm/src/main/java/org/algorithm/core/neural/DiagnosisPredictExecutor.java

@@ -15,7 +15,8 @@ public class DiagnosisPredictExecutor extends AlgorithmNeuralExecutor {
     public DiagnosisPredictExecutor() {
         String modelVersion = "diagnosisPredict.version";
 
-        this.model = TensorFlowModelLoadFactory.create(modelVersion);
+//        this.model = TensorFlowModelLoadFactory.create(modelVersion);
+        this.model = TensorFlowModelLoadFactory.createAndFilterDiagnosis(modelVersion);  // 加了疾病过滤
     }
 
 }

+ 30 - 7
algorithm/src/main/java/org/algorithm/core/neural/TensorFlowModelLoadFactory.java

@@ -11,13 +11,10 @@ import org.diagbot.pub.utils.PropertiesUtil;
  * @Description:
  */
 public class TensorFlowModelLoadFactory {
-    
+
     /**
      * 加载并创建模型类
-     * @param exportDir  模型保存地址
-     * @param inputOpName  输入op的名称
-     * @param outputOpName  输出op的名称
-     * @param dataSet     模型使用的数据集
+     * @param modelVersion  模型版本号
      * @return 模型
      */
     public static TensorflowModel create(String modelVersion) {
@@ -28,10 +25,9 @@ public class TensorFlowModelLoadFactory {
         String inputOpName = "X";  // 统一输入op名称
         String outputOpName = "softmax/softmax";  // 统一输出op名称
         
-        // TODO:修改的地方
 //        NNDataSet dataSet = new NNDataSetImplNonParallel(modelVersion);  // 新模型
         NNDataSet dataSet = new NNDataSetImpl(modelVersion);  // 老模型
-        
+
         String modelPath =prop.getProperty("basicPath");  // 模型基本路径
         modelVersion = prop.getProperty(modelVersion);
         modelPath = modelPath.replace("model_version_replacement", modelVersion);  // 生成模型路径
@@ -41,4 +37,31 @@ public class TensorFlowModelLoadFactory {
         return tm;
     }
 
+    /**
+     * 加载并创建模型类
+     * @param modelVersion  模型版本号
+     * @return 模型
+     */
+    public static TensorflowModel createAndFilterDiagnosis(String modelVersion) {
+
+
+        PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
+
+        String inputOpName = "X";  // 统一输入op名称
+        String outputOpName = "softmax/softmax";  // 统一输出op名称
+
+        NNDataSet dataSet = new NNDataSetImpl(modelVersion);  // 老模型
+
+        dataSet.setDoFilterDiagnosis(true);
+        dataSet.readFilterDiagnosisDict();
+
+        String modelPath =prop.getProperty("basicPath");  // 模型基本路径
+        modelVersion = prop.getProperty(modelVersion);
+        modelPath = modelPath.replace("model_version_replacement", modelVersion);  // 生成模型路径
+
+        TensorflowModel tm = new TensorflowModel(modelPath, inputOpName, outputOpName,
+                dataSet);
+        return tm;
+    }
+
 }

+ 122 - 13
algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSet.java

@@ -1,10 +1,10 @@
 package org.algorithm.core.neural.dataset;
 
-import java.util.HashMap;
-import java.util.Map;
+import java.util.*;
 
 /**
  * 神经网络用数据处理模块
+ *
  * @Author: bijl
  * @Date: 2018年7月20日-下午4:01:34
  * @Description:
@@ -13,17 +13,20 @@ public abstract class NNDataSet {
     protected final int NUM_FEATURE;
     private final int NUM_LABEL;
     protected final Map<String, Integer> FEATURE_DICT = new HashMap<>();
-    
+
     // 新版本新加的三种关键词
     protected final Map<String, Integer> PARTBODY_DICT = new HashMap<>();
     protected final Map<String, Integer> PROPERTY_DICT = new HashMap<>();
     protected final Map<String, Integer> DURATION_DICT = new HashMap<>();
-    
+
     protected final Map<String, Integer> LABEL_DICT = new HashMap<>();
     protected final Map<String, Integer> NEGATIVE_DICT = new HashMap<>();
+    protected final Map<String, String> RE_SPLIT_WORD_DICT = new HashMap<>();
+    protected final Map<String, Map<String, Integer>> RELATED_DIAGNOSIS_DICT = new HashMap<>();
+    protected final List<String> FEATURE_NAME_STORE = new ArrayList<>();
     private final String[] FEATURE_DICT_ARRAY;
     private final String[] LABEL_DICT_ARRAY;
-
+    private boolean doFilterDiagnosis = false;
 
     public NNDataSet(String modelAndVersion) {
         this.readDict(modelAndVersion);
@@ -32,10 +35,12 @@ public abstract class NNDataSet {
         this.FEATURE_DICT_ARRAY = new String[this.NUM_FEATURE];
         this.LABEL_DICT_ARRAY = new String[this.NUM_LABEL];
         this.makeDictArr();
+        this.readReSplitWordDict();
     }
-    
+
     /**
      * 装外部输入转为特征向量
+     *
      * @param inputs
      * @return
      */
@@ -45,28 +50,118 @@ public abstract class NNDataSet {
      * 读取特征和类别字典
      */
     public abstract void readDict(String modelAndVersion);
-    
+
+    /**
+     * 读取再分词字典
+     */
+    public abstract void readReSplitWordDict();
+
+    /**
+     * 读取过滤字典
+     */
+    public abstract void readFilterDiagnosisDict();
+
     /**
      * 生成字典列表
      */
     private void makeDictArr() {
-        for (Map.Entry<String, Integer> entry : this.FEATURE_DICT.entrySet()) 
+        for (Map.Entry<String, Integer> entry : this.FEATURE_DICT.entrySet())
             this.FEATURE_DICT_ARRAY[entry.getValue()] = entry.getKey();
-        
-        for (Map.Entry<String, Integer> entry : this.LABEL_DICT.entrySet()) 
+
+        for (Map.Entry<String, Integer> entry : this.LABEL_DICT.entrySet())
             this.LABEL_DICT_ARRAY[entry.getValue()] = entry.getKey();
-        
+
+    }
+
+    /**
+     * 打包特征名和概率 + 过滤疾病
+     * 基本操作,过滤前20个疾病,如果
+     *
+     * @param predict 模型输出
+     * @return
+     */
+    public Map<String, Float> wrapAndFilter(float[][] predict) {
+        List<NameAndValue> nameAndValueList = new ArrayList<>();
+        for (int i = 0; i < predict[0].length; i++)
+            nameAndValueList.add(new NameAndValue(this.LABEL_DICT_ARRAY[i], predict[0][i]));
+        nameAndValueList.sort(Comparator.reverseOrder());  // 按概率从大到小排列
+
+        Map<String, Float> result = new HashMap<>();
+        Integer cnt = 0;
+        String diagnosis;
+        NameAndValue nameAndValue;
+        Map<String, Integer> relatedDiagnoses = null;
+        for (int i = 0; i < nameAndValueList.size(); i++) {
+            nameAndValue = nameAndValueList.get(i);
+            diagnosis = nameAndValue.getName();
+            for (String featureName : this.FEATURE_NAME_STORE) {
+                relatedDiagnoses = this.RELATED_DIAGNOSIS_DICT.get(featureName);
+                if (relatedDiagnoses != null && relatedDiagnoses.get(diagnosis) != null) {
+                    result.put(nameAndValue.getName(), nameAndValue.getValue());
+                    cnt += 1;
+                }
+            }
+            if ((i >= 20 || i >= 50) && cnt > 0)  // 如果前20或50个推送中有相关的疾病,只过滤他们
+                break;
+        }
+        return result;
+    }
+
+    /**
+     * 用于排序的类
+     */
+    class NameAndValue implements Comparable<NameAndValue> {
+
+        private String name;
+        private Float value;
+
+        NameAndValue(String name, Float value) {
+            this.name = name;
+            this.value = value;
+        }
+
+        @Override
+        public int compareTo(NameAndValue o) {
+            if (this.value > o.getValue())
+                return 1;
+            else if (this.value.equals(o.getValue()))
+                return 0;
+            else
+                return -1;
+        }
+
+        public Float getValue() {
+            return value;
+        }
+
+        public String getName() {
+            return name;
+        }
     }
 
     /**
      * 打包模型输出结果给调用者
-     * 
+     *
      * @param predict 模型输出
      * @return
      */
     public Map<String, Float> wrap(float[][] predict) {
+        if (this.doFilterDiagnosis)  // 过滤疾病
+            return this.wrapAndFilter(predict);
+        else
+            return this.basicWrap(predict);
+    }
+
+
+    /**
+     * 打包模型输出结果给调用者
+     *
+     * @param predict 模型输出
+     * @return
+     */
+    public Map<String, Float> basicWrap(float[][] predict) {
         Map<String, Float> result = new HashMap<>();
-        for (int i=0; i<predict[0].length; i++) {  // 只返回一维向量
+        for (int i = 0; i < predict[0].length; i++) {  // 只返回一维向量
             result.put(this.LABEL_DICT_ARRAY[i], predict[0][i]);
         }
         return result;
@@ -79,6 +174,15 @@ public abstract class NNDataSet {
         return this.NUM_FEATURE;
     }
 
+    /**
+     *  存储特征名称
+     * @param features
+     */
+    public void storeFeatureNames(Map<String, Map<String, String>> features){
+        this.FEATURE_NAME_STORE.clear();
+        this.FEATURE_NAME_STORE.addAll(features.keySet());
+    }
+
     /**
      * @return
      */
@@ -86,4 +190,9 @@ public abstract class NNDataSet {
         return this.NUM_LABEL;
     }
 
+
+    public void setDoFilterDiagnosis(boolean doFilterDiagnosis) {
+        this.doFilterDiagnosis = doFilterDiagnosis;
+    }
+
 }

+ 113 - 73
algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSetImpl.java

@@ -3,6 +3,7 @@ package org.algorithm.core.neural.dataset;
 import org.algorithm.util.TextFileReader;
 import org.diagbot.pub.utils.PropertiesUtil;
 
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -10,7 +11,7 @@ import java.util.Map.Entry;
 
 /**
  * 门诊诊断推送用数据集
- * 
+ *
  * @Author: bijl
  * @Date: 2018年7月26日-上午10:19:43
  * @Description:
@@ -22,9 +23,13 @@ public class NNDataSetImpl extends NNDataSet {
         super(modelAndVersion);
     }
 
-
     @Override
     public float[] toFeatureVector(Map<String, Map<String, String>> inputs) {
+
+        // 新添加的操作
+        this.reSplitWord(inputs);  // 再分词
+        this.storeFeatureNames(inputs);  // 保存特征名称
+
         float[] featureVector = new float[this.NUM_FEATURE];
 
         Iterator<Entry<String, Map<String, String>>> entries = inputs.entrySet().iterator();
@@ -32,13 +37,9 @@ public class NNDataSetImpl extends NNDataSet {
         String featureName = "";
         Integer position = -1;
         Integer negative = 0;
-        // Integer partbodyValue = 0;
         float positive_value = 1.0f;
         float negative_value = -1.0f;
         Map<String, String> featureValues = null;
-        // String partbody = null;
-        // String[] partbodys = null;
-        // String sn = null;
 
         /**
          * 数据方案设计
@@ -51,11 +52,6 @@ public class NNDataSetImpl extends NNDataSet {
             featureValues = entry.getValue();
             position = this.FEATURE_DICT.get(featureName);
             negative = NEGATIVE_DICT.get(featureValues.get("negative"));
-            // 突出主症状的数据方案
-            // sn = featureValues.get("sn");
-            // if("0".equals(sn)) {
-            // negative = negative * 10;
-            // }
 
             if (position != null)
                 if (negative == 1)
@@ -65,91 +61,36 @@ public class NNDataSetImpl extends NNDataSet {
                 else
                     System.out.println("New Nagetive! This may lead to an error.");
 
-
-
-            /**
-             * 部位附属症状数据表示方案 partbodyValue = this.PARTBODY_DICT.get(featureValues.get("partbody"));
-             * if(partbodyValue != null) { value = 1.0f * partbodyValue /
-             * this.PARTBODY_DICT.get("NULL"); // 部位值表示 value = (float)(Math.round(value *
-             * 100000))/100000; // 保留5位有效数字 } value = negative * value; featureVector[position] =
-             * value;
-             * 
-             */
-
         }
 
         return featureVector;
     }
 
-
-    /**
-     * 读取字典
-     */
-//     @Override
-//     public void readDict(String modelAndVersion) {
-//    
-//     PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
-//     String model_version = prop.getProperty(modelAndVersion);
-//     model_version = model_version.trim();
-//    
-//     String url = "jdbc:mysql://192.168.2.235/diagbot-app?user=root&password=diagbot@20180822";
-//     MysqlConnector connector = new MysqlConnector(url);
-//     String querySql = "SELECT md._name, md._index, md.type_id " + "FROM model_dictionary AS md "
-//     + "WHERE md.belong_model = 'outpatient_model'";
-//    
-//     querySql = querySql.replace("outpatient_model", model_version);
-//     ResultSet rs = connector.query(querySql);
-//     try {
-//     while (rs.next()) {
-//     int type_id = rs.getInt("type_id");
-//     int _index = rs.getInt("_index");
-//     String _name = rs.getString("_name");
-//    
-//     if (type_id == 1)
-//     this.FEATURE_DICT.put(_name, _index);
-//     else if (type_id == 2)
-//     this.LABEL_DICT.put(_name, _index);
-//     else if (type_id == 8)
-//     this.NEGATIVE_DICT.put(_name, _index);
-//    
-//     }
-//    
-//     System.out.println("feature size:"+this.FEATURE_DICT.size());
-//    
-//     } catch (SQLException e) {
-//     e.printStackTrace();
-//     throw new RuntimeException("加载特征和类别字典失败");
-//     } finally {
-//     connector.close();
-//     }
-//    
-//     }
-
     @Override
     public void readDict(String modelAndVersion) {
-        
+
         PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
         String model_version = prop.getProperty(modelAndVersion);
 
         String filePath = prop.getProperty("basicPath");  // 基本目录
         filePath = filePath.substring(0, filePath.indexOf("model_version_replacement"));
-        
+
         filePath = filePath + "dictionaries.bin";  // 字典文件位置
-        
+
         List<String> lines = TextFileReader.readLines(filePath);
 
         boolean firstLine = true;
-        
+
         String[] temp = null;
         for (String line : lines) {
             if (firstLine) {  // 去除第一行
                 firstLine = false;
                 continue;
             }
-            
+
             temp = line.split("\\|");
-            
-            if(temp[3].equals(model_version)){
+
+            if (temp[3].equals(model_version)) {
                 int type_id = Integer.parseInt(temp[2]);
                 int _index = Integer.parseInt(temp[1]);
                 String _name = temp[0];
@@ -168,4 +109,103 @@ public class NNDataSetImpl extends NNDataSet {
 
     }
 
+    /**
+     * 再分词:
+     * 基本操作:
+     * 如果再分词表中有某一词项,则移除它,并添加该此项对应的细分词项
+     *
+     * @param inputs 输入
+     */
+    public void reSplitWord(Map<String, Map<String, String>> inputs) {
+        Iterator<Entry<String, Map<String, String>>> entries = inputs.entrySet().iterator();
+
+        String featureName = "";
+        String[] splitWords = null;
+        Map<String, String> featureValues = null;
+        Entry<String, Map<String, String>> entry;
+
+        Map<String, Map<String, String>> tempHashMap = new HashMap<>();  // 用于暂存key, value
+
+        while (entries.hasNext()) {
+            entry = entries.next();
+            featureName = entry.getKey();
+            if (this.FEATURE_DICT.get(featureName) == null  // 特征字典中没有然后再分词
+                    && this.RE_SPLIT_WORD_DICT.get(featureName) != null) {
+                entries.remove();  // 移除该词项
+                splitWords = this.RE_SPLIT_WORD_DICT.get(featureName).split(",");
+                for (String word : splitWords) {  // 添加细分词项
+                    featureValues = new HashMap<>();
+                    featureValues.put("negative", "有"); // 设置为阳性词
+                    tempHashMap.put(word, featureValues);
+                }
+
+            }
+        }
+
+        inputs.putAll(tempHashMap);
+    }
+
+    @Override
+    public void readReSplitWordDict() {
+        PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
+        String filePath = prop.getProperty("basicPath");  // 基本目录
+        filePath = filePath.substring(0, filePath.indexOf("model_version_replacement"));
+
+        filePath = filePath + "re_split_word.bin";  // 字典文件位置
+
+        List<String> lines = TextFileReader.readLines(filePath);
+
+        boolean firstLine = true;
+
+        String[] temp = null;
+        Map<String, String> feature_map = null;
+        for (String line : lines) {
+            if (firstLine) {  // 去除第一行
+                firstLine = false;
+                continue;
+            }
+
+            temp = line.split("\\|");
+
+            this.RE_SPLIT_WORD_DICT.put(temp[0], temp[1]);
+
+        }
+
+        System.out.println("再分词,词条数:" + this.RE_SPLIT_WORD_DICT.size());
+
+    }
+
+    @Override
+    public void readFilterDiagnosisDict() {
+        PropertiesUtil prop = new PropertiesUtil("/algorithm.properties");
+        String filePath = prop.getProperty("basicPath");  // 基本目录
+        filePath = filePath.substring(0, filePath.indexOf("model_version_replacement"));
+
+        filePath = filePath + "filter_diagnoses.bin";  // 字典文件位置
+
+        List<String> lines = TextFileReader.readLines(filePath);
+
+        boolean firstLine = true;
+
+        String[] temp = null;
+        String[] diagnoses = null;
+        Map<String, Integer> diagnosis_map = null;
+        for (String line : lines) {
+            if (firstLine) {  // 去除第一行
+                firstLine = false;
+                continue;
+            }
+
+            temp = line.split("\\|");
+            diagnoses = temp[1].split("_");
+            diagnosis_map = new HashMap<>();
+            for (String diagnosis: diagnoses)
+                diagnosis_map.put(diagnosis, 1);
+            this.RELATED_DIAGNOSIS_DICT.put(temp[0], diagnosis_map);
+        }
+
+        System.out.println("疾病过滤字典大小:" + this.RELATED_DIAGNOSIS_DICT.size());
+    }
+
+
 }

+ 11 - 1
algorithm/src/main/java/org/algorithm/core/neural/dataset/NNDataSetImplNonParallel.java

@@ -22,7 +22,17 @@ public class NNDataSetImplNonParallel extends NNDataSet {
         super(modelAndVersion);
     }
 
-    
+
+    @Override
+    public void readReSplitWordDict() {
+
+    }
+
+    @Override
+    public void readFilterDiagnosisDict() {
+
+    }
+
     @Override
     public float[] toFeatureVector(Map<String, Map<String, String>> inputs) {
         // inputs {症状名:{partbody:部位名, property:属性名, duration:时间类别, sex:性别值, age:年龄值}

+ 33 - 0
algorithm/src/main/java/org/algorithm/factory/RelationExtractionFactory.java

@@ -0,0 +1,33 @@
+package org.algorithm.factory;
+
+import org.algorithm.core.cnn.AlgorithmCNNExecutorPacs;
+import org.algorithm.core.cnn.model.RelationExtractionEnsembleModel;
+
+/**
+ * @Description:
+ * @Author: HUJING
+ * @Date: 2019/9/10 15:25
+ */
+public class RelationExtractionFactory {
+    private static RelationExtractionEnsembleModel relationExtractionEnsembleModelInstance = null;
+
+    public static AlgorithmCNNExecutorPacs getInstance() {
+        try {
+            relationExtractionEnsembleModelInstance = (RelationExtractionEnsembleModel) create(relationExtractionEnsembleModelInstance, RelationExtractionEnsembleModel.class);
+        } catch (InstantiationException inst) {
+            inst.printStackTrace();
+        } catch (IllegalAccessException ille) {
+            ille.printStackTrace();
+        }
+        return relationExtractionEnsembleModelInstance;
+    }
+
+    private static Object create(Object obj, Class cls) throws InstantiationException, IllegalAccessException {
+        if (obj == null) {
+            synchronized (cls) {
+                obj = cls.newInstance();
+            }
+        }
+        return obj;
+    }
+}

+ 5 - 3
algorithm/src/main/java/org/algorithm/test/ReEnsembleModelTest.java

@@ -18,7 +18,7 @@ public class ReEnsembleModelTest {
 
     public static void main(String[] args) {
         RelationExtractionEnsembleModel ensembleModel = new RelationExtractionEnsembleModel();
-
+        List<List<String>> result = new ArrayList<>();
         List<Triad> triads = new ArrayList<>();
         Triad triad_1 = new Triad();
         Lemma l_1 = new Lemma();
@@ -36,9 +36,11 @@ public class ReEnsembleModelTest {
 
         long start = System.nanoTime();
         for (int i=0; i<200; i++)  // 重复100次
-            triads = ensembleModel.execute("患者剧烈胸痛头痛失眠不安", triads);
+        {
+            result = ensembleModel.execute("患者剧烈胸痛头痛失眠不安", triads);
+        }
         long elapsedTime = System.nanoTime() - start;
-        System.out.println(triads.size());
+        System.out.println(result.size());
         System.out.println(elapsedTime);
     }
 }

+ 9 - 4
algorithm/src/main/java/org/algorithm/test/TensorflowExcutorTest.java

@@ -1,5 +1,6 @@
 package org.algorithm.test;
 
+import org.algorithm.core.neural.DiagnosisPredictExecutor;
 import org.algorithm.core.neural.SymptomPredictExecutor;
 import org.algorithm.util.Utils;
 
@@ -13,9 +14,9 @@ public class TensorflowExcutorTest {
         
         //TODO:change VitalPredictExcutor to test different executors
 //        VitalPredictExecutor excutor = new VitalPredictExecutor();
-        SymptomPredictExecutor excutor = new SymptomPredictExecutor();
+//        SymptomPredictExecutor excutor = new SymptomPredictExecutor();
 //        LisPredictExecutor excutor = new LisPredictExecutor();
-//        DiagnosisPredictExecutor excutor = new DiagnosisPredictExecutor();
+        DiagnosisPredictExecutor excutor = new DiagnosisPredictExecutor();
 //        PacsPredictExecutor excutor = new PacsPredictExecutor();
 //        DiagnosisToLisExecutor excutor = new DiagnosisToLisExecutor();
 //        DiagnosisToPacsExecutor excutor = new DiagnosisToPacsExecutor();
@@ -75,7 +76,11 @@ public class TensorflowExcutorTest {
         featureValues.put("age", "34");
         featureValues.put("negative", "有");
         featureValues.put("sn", "0");
-        aMap.put("踝关节疼痛", featureValues);
+
+        aMap.put("上臂远端疼痛", featureValues);
+        aMap.put("上肢远端青紫", featureValues);
+        aMap.put("肘部肿胀", featureValues);
+        aMap.put("外伤", featureValues);
 //        aMap.put("心悸", featureValues);
 //        aMap.put("气急", featureValues);
 //        aMap.put("头痛", featureValues);
@@ -87,7 +92,7 @@ public class TensorflowExcutorTest {
 //        for (Entry<String, Float> entry : result.entrySet()) {
 //            System.out.println(entry.getKey() + " : " + entry.getValue());
 //        }
-//        System.out.println(result);
+        System.out.println(result);
         Utils.top_k(10, result);
 
     }

+ 47 - 40
algorithm/src/main/java/org/algorithm/test/Test.java

@@ -1,49 +1,56 @@
 package org.algorithm.test;
 
+import java.util.*;
 
 public class Test {
-    
+
+
     public static void main(String[] args) {
-        
-//        Integer aa = new Integer(53);
-//        Integer bb = new Integer(954);
-//        float xx = 1.0f;
-//        for(int i=1; i< 955; i++) {
-//            xx = (float)(Math.round(1.0f * i / bb*100000))/100000;
-//            System.out.println(i+":"+xx);
-////        }
-//        String filePath = "/opt/models/model_version_replacement/model";
-//        int index = filePath.indexOf("model_version_replacement");
-//
-//        System.out.println(filePath.substring(0, index));
-//            public static void testJSONStrToJavaBeanObj(){
-//
-//        Student student = JSON.parseObject(JSON_OBJ_STR, new TypeReference<Student>() {});
-//        //Student student1 = JSONObject.parseObject(JSON_OBJ_STR, new TypeReference<Student>() {});//因为JSONObject继承了JSON,所以这样也是可以的
-//
-//        System.out.println(student.getStudentName()+":"+student.getStudentAge());
-//
-        String JSON_ARRAY_STR = "[{\"length\":4,\"offset\":0,\"property\":\"1\",\"text\":\"剑突下痛\",\"threshold\":0.0},{\"length\":2,\"offset\":4,\"property\":\"1\",\"text\":\"胀痛\",\"threshold\":0.0},{\"length\":2,\"offset\":6,\"property\":\"2\",\"text\":\"1天\",\"threshold\":0.0},{\"length\":1,\"offset\":8,\"text\":\",\",\"threshold\":0.0}]\n";
-//        JSONArray jsonArray = JSONArray.parseArray(JSON_ARRAY_STR);
-////        String jsonString = "{\"length\":4,\"offset\":0,\"property\":\"1\",\"text\":\"剑突下痛\",\"threshold\":0.0}";
-//
-//       for (int i = 0; i < jsonArray.size(); i++){
-//           JSONObject job = jsonArray.getJSONObject(i);
-//           LemmaInfo info = JSON.parseObject(job.toJSONString(), new TypeReference<LemmaInfo>() {});
-//           //Student student1 = JSONObject.parseObject(JSON_OBJ_STR, new TypeReference<Student>() {});//因为JSONObject继承了JSON,所以这样也是可以的
-//
-//           System.out.println(info.getLength()+":"+info.getText());
-//       }
-
-        int index = 0;
-        for (int i=0; i<5; i++)
-            for (int j = i+1; j< 6; j++){
-                System.out.println(i + "," + j);
-                index ++;
-            }
-
-        System.out.println(index);
+        List<Integer> data = new ArrayList<>();
+        data.add(1);
+        data.add(3);
+        data.add(5);
+        data.add(7);
+        Test t = new Test();
+
+        List<List<Integer>> workSpace = new ArrayList<>();
+        for (int i = 1; i < data.size(); i++) {
+            t.combinerSelect(data, new ArrayList<>(), workSpace, data.size(), i);
+        }
+
+        System.out.println(workSpace);
 
     }
 
+    /**
+     * 组合生成器
+     *
+     * @param data      原始数据
+     * @param workSpace 自定义一个临时空间,用来存储每次符合条件的值
+     * @param k         C(n,k)中的k
+     */
+    public <E> void combinerSelect(List<E> data, List<E> workSpace, List<List<E>> result, int n, int k) {
+        List<E> copyData;
+        List<E> copyWorkSpace = null;
+
+        if (workSpace.size() == k) {
+            for (E c : workSpace)
+                System.out.print(c);
+
+            result.add(new ArrayList<>(workSpace));
+            System.out.println();
+        }
+
+        for (int i = 0; i < data.size(); i++) {
+            copyData = new ArrayList<E>(data);
+            copyWorkSpace = new ArrayList<E>(workSpace);
+
+            copyWorkSpace.add(copyData.get(i));
+            for (int j = i; j >= 0; j--)
+                copyData.remove(j);
+            combinerSelect(copyData, copyWorkSpace, result, n, k);
+        }
+    }
+
 }
+

+ 46 - 0
algorithm/src/main/java/org/algorithm/test/TestDiagnosisFilter.java

@@ -0,0 +1,46 @@
+package org.algorithm.test;
+
+import org.algorithm.core.neural.dataset.NNDataSetImpl;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * @Author: bijl
+ * @Date: 2019/9/23 10:49
+ * @Description:
+ */
+public class TestDiagnosisFilter {
+
+    public static void main(String[] args) {
+        NNDataSetImpl dataSet = new NNDataSetImpl("diagnosisPredict.version");
+
+        dataSet.readFilterDiagnosisDict();  // 读取过滤表
+//        鼻炎|0|2|outpatient_556_IOE_1
+//        肺癌|1|2|outpatient_556_IOE_1
+//        胃肠炎|2|2|outpatient_556_IOE_1
+//        屈光不正|3|2|outpatient_556_IOE_1
+        // 构造方式:去查dictionaries.bin文件中outpatient_556_IOE_1,相关的疾病,形如上
+        float[][] predict = {{0.1f, 0.2f, 0.3f, 0.4f}};
+
+        // 构造输入
+        Map<String, Map<String, String>> inputs = new HashMap<>();
+        Map<String, String> featureMap = new HashMap<>();
+        featureMap.put("negative", "有");
+        featureMap.put("property", "11");
+
+        // 构造方式:去查filter_diagnoses.bin文件中与上述疾病相关的一个或多个特征,加入
+        inputs.put("上腹压痛", featureMap);  // 上腹压痛,只与,胃肠炎,相关
+        // 保存输入
+        dataSet.storeFeatureNames(inputs);
+
+        // 过滤疾病
+        Map<String, Float> result = dataSet.wrapAndFilter(predict);
+        Map<String, Float> result_no_filter = dataSet.basicWrap(predict);
+
+        System.out.println("无疾病过滤:" + result_no_filter);  // 期望输出 {鼻炎=0.1, 肺癌=0.2, 胃肠炎=0.3, 屈光不正=0.4}
+        System.out.println("疾病过滤:" + result);  // 期望输出{胃肠炎=0.3}
+
+
+    }
+}

+ 34 - 0
algorithm/src/main/java/org/algorithm/test/TestReSplit.java

@@ -0,0 +1,34 @@
+package org.algorithm.test;
+
+import org.algorithm.core.neural.dataset.NNDataSetImpl;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * 测试再分词
+ * @Author: bijl
+ * @Date: 2019/9/23 10:46
+ * @Description:
+ */
+public class TestReSplit {
+
+    public static void main(String[] args) {
+
+        NNDataSetImpl dataSet = new NNDataSetImpl("diagnosisPredict.version");
+
+        // 构造输入
+        Map<String, Map<String, String>> inputs = new HashMap<>();
+
+        Map<String, String> featureMap = new HashMap<>();
+        featureMap.put("negative", "有");
+        featureMap.put("property", "11");
+
+        inputs.put("幽门螺杆菌感染", featureMap);
+
+        // 对比再分词前后的变化
+        System.out.println("原来数据:" + inputs);
+        dataSet.reSplitWord(inputs);
+        System.out.println("再分词后数据:" + inputs);
+    }
+}

+ 15 - 0
algorithm/src/main/java/org/algorithm/test/TestRelationTreeUtils.java

@@ -0,0 +1,15 @@
+package org.algorithm.test;
+
+import org.algorithm.core.RelationTreeUtils;
+
+/**
+ * @Author: bijl
+ * @Date: 2019/9/5 17:07
+ * @Description:
+ */
+public class TestRelationTreeUtils {
+
+    public static void main(String[] args) {
+        RelationTreeUtils.test();
+    }
+}

ファイルの差分が大きいため隠しています
+ 140 - 0
algorithm/src/main/java/org/algorithm/test/TestRuleCheckMachine.java


+ 1 - 1
algorithm/src/main/java/org/algorithm/util/MysqlConnector.java

@@ -45,7 +45,7 @@ public class MysqlConnector {
     
     /**
      * 执行sql语句
-     * @param sql
+     * @param sqls
      */
     public void executeBatch(List<String> sqls) {
         Statement stmt = null;

+ 1 - 1
algorithm/src/main/resources/algorithm.properties

@@ -2,7 +2,7 @@
 
 #basicPath=E:/project/push/algorithm/src/main/models/model_version_replacement/model
 basicPath=/opt/models/dev/models/model_version_replacement/model
-#basicPath=E:/xxx/model_version_replacement/model
+#basicPath=F:/models/model_version_replacement/model
 
 ############################### current model version ################################
 diagnosisPredict.version=outpatient_556_IOE_1

+ 0 - 6
common-push/pom.xml

@@ -28,12 +28,6 @@
             <artifactId>nlp</artifactId>
             <version>1.0.0</version>
         </dependency>
-
-        <dependency>
-            <groupId>org.diagbot</groupId>
-            <artifactId>common-service</artifactId>
-            <version>1.0.0</version>
-        </dependency>
     </dependencies>
 
     <build>

+ 2 - 0
common-push/src/main/java/org/diagbot/common/push/bean/SearchData.java

@@ -1,5 +1,7 @@
 package org.diagbot.common.push.bean;
 
+import org.diagbot.nlp.rule.module.PreResult;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;

+ 33 - 0
common-push/src/main/java/org/diagbot/common/push/cache/ApplicationCacheUtil.java

@@ -9,8 +9,10 @@ import org.diagbot.nlp.util.NlpCache;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class ApplicationCacheUtil {
 
@@ -24,6 +26,8 @@ public class ApplicationCacheUtil {
     public static Map<String, List<Rule>> kl_rule_filter_map = null;
     //危险值提醒
     public static Map<String, RuleApp> kl_rule_app_filter_map = null;
+    //pacs关系抽取过滤
+    public static Map<String, Map<String, String>> kl_diagnose_detail_filter_map = null;
 
     public static Map<String, Map<String, String>> getStandard_info_synonym_map() {
         if (standard_info_synonym_map == null) {
@@ -154,4 +158,33 @@ public class ApplicationCacheUtil {
             }
         }
     }
+
+    public static Map<String, Map<String, String>> getKl_diagnose_detail_filter_map() {
+        if (kl_diagnose_detail_filter_map == null) {
+            create_kl_diagnose_detail_filter_map();
+        }
+        return kl_diagnose_detail_filter_map;
+    }
+
+    public static void create_kl_diagnose_detail_filter_map() {
+        kl_diagnose_detail_filter_map = new HashMap<>();
+        Map<String, String> diagnoseDetailRelationMap = new HashMap<>();
+        Set<String> diagnoseDetailRelation = new HashSet<>();
+        Configuration configuration = new DefaultConfig();
+        List<String> fileContents = configuration.readFileContents("bigdata_diagnose_detail_filter.dict");
+        for (String line : fileContents) {
+            String[] content = line.split("\\|", -1);
+            String[] relations = content[1].split("、");
+            for (String relation : relations) {
+                if (diagnoseDetailRelation.add(relation)) {
+                    if (kl_diagnose_detail_filter_map.get(content[0]) == null) {
+                        diagnoseDetailRelationMap.put(relation, relation);
+                        kl_diagnose_detail_filter_map.put(content[0], diagnoseDetailRelationMap);
+                    } else {
+                        kl_diagnose_detail_filter_map.get(content[0]).put(relation, relation);
+                    }
+                }
+            }
+        }
+    }
 }

+ 14 - 2
common-push/src/main/java/org/diagbot/common/push/cache/CacheFileManager.java

@@ -249,7 +249,7 @@ public class CacheFileManager {
             }
             fw.close();
             //疾病科室信息
-            sql = "SELECT k1.lib_name diag_name, k2.lib_name dept_name FROM kl_concept_common kcc, kl_concept k1, kl_concept k2 " +
+            sql = "SELECT k1.lib_name diag_name, k2.lib_name dept_name FROM kl_disease kcc, kl_concept k1, kl_concept k2 " +
                     "where kcc.concept_id = k1.id and kcc.dept_id = k2.id " +
                     "and k1.lib_type = 18 and kcc.dept_id  is not null";
             st = conn.createStatement();
@@ -293,7 +293,7 @@ public class CacheFileManager {
         try {
             EncrypDES encrypDES = new EncrypDES();
             //疾病科室
-            String sql = "SELECT k1.lib_name diag_name, k2.lib_name dept_name FROM kl_concept_common kcc, kl_concept k1, kl_concept k2 " +
+            String sql = "SELECT k1.lib_name diag_name, k2.lib_name dept_name FROM kl_disease kcc, kl_concept k1, kl_concept k2 " +
                     "where kcc.concept_id = k1.id and kcc.dept_id = k2.id " +
                     "and k1.lib_type = 18 and kcc.dept_id  is not null";
             st = conn.createStatement();
@@ -398,6 +398,18 @@ public class CacheFileManager {
                 fw.write("\n");
             }
             fw.close();
+
+            sql = "SELECT type,relation FROM `kl_diagnose_detail` WHERE type = 4 AND LENGTH(relation) > 0 GROUP BY relation";
+            st = conn.createStatement();
+            rs = st.executeQuery(sql);
+            fw = new FileWriter(path + "bigdata_diagnose_detail_filter.dict");
+            while (rs.next()) {
+                r1 = String.valueOf(rs.getInt(1));
+                r2 = rs.getString(2);
+                fw.write(encrypDES.encrytor(r1+ "|" + r2));
+                fw.write("\n");
+            }
+            fw.close();
         } catch (IOException ioe) {
             ioe.printStackTrace();
         } catch (SQLException sqle) {

+ 14 - 2
common-push/src/main/java/org/diagbot/common/push/filter/ClassifyDiag.java

@@ -91,7 +91,7 @@ public class ClassifyDiag {
                 String desc = featureRate.getDesc();
                 Map<String,Object> d = new HashMap<>();
                 if(desc != null){
-                    JSONObject jsonObject = JSONObject.parseObject(desc);
+                   /* JSONObject jsonObject = JSONObject.parseObject(desc);
                     d = jsonObject;
                     if(d.keySet().size() == 1 && "警惕".equals(d.keySet().toArray()[0])){
                         highDiagList.add(featureName);
@@ -100,7 +100,12 @@ public class ClassifyDiag {
                         diffDiagList.add(featureName);
                     }else {
                         queDiagList.add(featureName);
-                    }
+                    }*/
+                   if(desc.contains("确诊") || desc.contains("拟诊")){
+                       queDiagList.add(featureName);
+                   }else {
+                       highDiagList.add(featureName);
+                   }
                 }else {
                     bigDiagList.add(featureName);
                 }
@@ -573,6 +578,13 @@ public class ClassifyDiag {
                     if(key != null && key.size()>0){
                         for (Object o:key) {
                             classifySet.add(o.toString());
+                            List<Object> key1 = this.getKey(diagClassifyCache, o.toString());
+                            if(key1 != null && key1.size()>0){
+                                for (Object f:key1
+                                     ) {
+                                    classifySet.add(f.toString());
+                                }
+                            }
                         }
                     }
                 }

+ 0 - 59
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentLis.java

@@ -1,59 +0,0 @@
-package org.diagbot.common.push.filter.pretreat;
-
-import org.diagbot.common.push.bean.PreResult;
-import org.diagbot.nlp.participle.word.Lexeme;
-import org.diagbot.nlp.participle.word.LexemePath;
-import org.diagbot.nlp.util.NegativeEnum;
-import org.diagbot.nlp.util.NlpUtil;
-
-import java.lang.reflect.Array;
-import java.util.List;
-
-public class PretreatmentLis extends Pretreatment {
-    private String join_symbols = ";:;:";
-
-    public List<PreResult> analyze(String content) throws java.io.IOException{
-        return super.analyzeDefault(content);
-    }
-
-    public PreResult createPreResult(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
-        PreResult result = new PreResult();
-        double value = findNumberValue(lexemes, lexeme, index);
-        if (value == -1) return null;
-        //继续往前找化验明细项
-        if (cursor > 0) cursor--;
-        Lexeme leftLexeme = lexemes.get(cursor);
-        if (join_symbols.contains(leftLexeme.getText())) {
-            if (cursor > 0) {
-                cursor--;
-                leftLexeme = lexemes.get(cursor);
-            } else {
-                return null;
-            }
-        }
-        if (NlpUtil.isFeature(leftLexeme.getProperty(), new NegativeEnum[]{NegativeEnum.LIS_NAME})) {
-            result.setDetailName(NlpUtil.concept(leftLexeme, NegativeEnum.LIS_NAME));
-        } else if (NlpUtil.isFeature(leftLexeme.getProperty(), new NegativeEnum[]{NegativeEnum.PUB_NAME})) {
-            result.setUniqueName(NlpUtil.concept(leftLexeme, NegativeEnum.PUB_NAME));
-        } else {
-            return null;
-        }
-        //查找化验套餐
-        int position = cursor - 1;
-        while (position > -1) {
-            leftLexeme = lexemes.get(position);
-            if (NlpUtil.isFeature(leftLexeme.getProperty(), new NegativeEnum[]{NegativeEnum.LIS_TYPE})) {
-                result.setName(NlpUtil.concept(leftLexeme, NegativeEnum.LIS_TYPE));
-                break;
-            }
-            position--;
-        }
-        result.setValue(String.valueOf(value));
-        result.setUnits(lexeme.getText());
-        return result;
-    }
-
-    public String findBodyValue(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
-        return null;
-    }
-}

+ 0 - 51
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentPacs.java

@@ -1,51 +0,0 @@
-package org.diagbot.common.push.filter.pretreat;
-
-import org.diagbot.common.push.bean.PreResult;
-import org.diagbot.nlp.participle.ParticipleUtil;
-import org.diagbot.nlp.participle.word.Lexeme;
-import org.diagbot.nlp.participle.word.LexemePath;
-import org.diagbot.nlp.util.NegativeEnum;
-import org.diagbot.nlp.util.NlpUtil;
-
-import java.util.List;
-
-public class PretreatmentPacs extends Pretreatment {
-    protected NegativeEnum[] nees_pacs_result = new NegativeEnum[]{NegativeEnum.PACS_RESULT};
-    protected NegativeEnum[] nees_pacs_name = new NegativeEnum[]{NegativeEnum.PACS_NAME};
-    public List<PreResult> analyze(String content) throws java.io.IOException{
-        List<PreResult> preResultList = super.analyzeDefault(content);
-        //pacs除了数值型需要转, 还需要对部分检查结果提取,以便做危机警示
-        LexemePath<Lexeme> lexemes = ParticipleUtil.participle(content);
-
-        Lexeme leftLexeme;
-        for (int i = 0; i < lexemes.size(); i++) {
-            Lexeme l = lexemes.get(i);
-            if (NlpUtil.isFeature(l.getProperty(), nees_pacs_result) && i > 0) {
-                int c = i - 1;
-                while (c > -1) {
-                    leftLexeme = lexemes.get(c);
-                    if (NlpUtil.isFeature(leftLexeme.getProperty(), nees_pacs_name)) {
-                        PreResult result = new PreResult();
-                        result.setValue(NlpUtil.concept(l, NegativeEnum.PACS_RESULT));
-                        result.setDetailName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
-                        result.setUniqueName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
-                        preResultList.add(result);
-                        break;
-                    }
-                    c--;
-                }
-
-            }
-        }
-
-        return preResultList;
-    }
-
-    public PreResult createPreResult(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
-        return super.createDefaultPreResult(lexemes, lexeme, index);
-    }
-
-    public String findBodyValue(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
-        return null;
-    }
-}

+ 10 - 11
common-push/src/main/java/org/diagbot/common/push/filter/rule/PretreatmentRule.java

@@ -1,13 +1,11 @@
 package org.diagbot.common.push.filter.rule;
 
 
-import org.diagbot.common.push.bean.PreResult;
-import org.diagbot.common.push.bean.ResponseData;
 import org.diagbot.common.push.bean.Rule;
 import org.diagbot.common.push.bean.SearchData;
 import org.diagbot.common.push.cache.ApplicationCacheUtil;
-import org.diagbot.common.push.filter.pretreat.*;
-import org.diagbot.pub.Constants;
+import org.diagbot.nlp.rule.module.PreResult;
+import org.diagbot.nlp.rule.pretreat.*;
 import org.springframework.util.StringUtils;
 
 import java.io.IOException;
@@ -63,13 +61,6 @@ public class PretreatmentRule {
         return add2PreResultList(preResultList, content, ruleType, searchData);
     }
 
-    public static void main(String[] args) throws IOException {
-        PretreatmentRule pretreatmentRule = new PretreatmentRule();
-        SearchData searchData = new SearchData();
-        searchData.setSymptom("钠(Na)110mmol/L");
-        pretreatmentRule.rule(searchData);
-    }
-
     private String add2PreResultList(List<PreResult> preResultList, String content, String ruleType, SearchData searchData) throws java.io.IOException {
         Map<String, List<Rule>> kl_rule_filter_map = ApplicationCacheUtil.getKl_rule_filter_map();
         //符合条件的规则
@@ -79,6 +70,10 @@ public class PretreatmentRule {
             for (PreResult result : preResultList) {
                 //规则库中匹配
                 if (kl_rule_filter_map.get(result.getUniqueName()) != null) {
+                    //结构化数据进来非数字类型值保存在otherValue,赋值到value中
+                    if(!StringUtils.isEmpty(result.getOtherValue())) {
+                        result.setValue(result.getOtherValue());
+                    }
                     List<Rule> rules = kl_rule_filter_map.get(result.getUniqueName());
                     if (rules == null) {
                         continue;
@@ -99,6 +94,10 @@ public class PretreatmentRule {
                             content = content + (rule.getRemind() == null ? "" : rule.getRemind());
                         }
                     }
+                    //还原回去
+                    if(!StringUtils.isEmpty(result.getOtherValue())) {
+                        result.setValue(null);
+                    }
                 }
             }
 

+ 22 - 0
common-push/src/main/java/org/diagbot/common/push/util/PushConstants.java

@@ -1,5 +1,8 @@
 package org.diagbot.common.push.util;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * @ClassName org.diagbot.bigdata.util.BigDataConstants
  * @Description TODO
@@ -38,4 +41,23 @@ public class PushConstants {
     public final static String result_mapping_vital = "resultMappingVitalMap";          //推送体征结果名称映射
     public final static String result_mapping_diag = "resultMappingDiagMap";          //推送疾病科室名称映射
     public final static String result_mapping_filter = "resultMappingFilterMap";          //推送结果年龄 性别过滤
+
+    //关系抽取property_id对应property_name
+    public final static Map<String,String> featureTypeMap = new HashMap<String,String>(){{
+        put("80","辅检其他");
+        put("9","单位");
+        put("2","时间");
+        put("3","部位");
+        put("7","反意或虚拟");
+        put("16","辅检项目");
+        put("17","辅检结果");
+        put("81","属性");
+        put("82","方位");
+        put("83","形容词");
+        put("84","局部结构");
+        put("85","属性值");
+        put("86","表现");
+        put("28","字母与数值");
+        put("87","正常表现");
+    }};
 }

+ 13 - 21
common-push/src/main/java/org/diagbot/common/push/work/ParamsDataProxy.java

@@ -1,5 +1,8 @@
 package org.diagbot.common.push.work;
 
+import org.algorithm.core.cnn.AlgorithmCNNExecutor;
+import org.algorithm.core.cnn.AlgorithmCNNExecutorPacs;
+import org.algorithm.factory.RelationExtractionFactory;
 import org.apache.commons.lang3.StringUtils;
 import org.diagbot.common.push.bean.SearchData;
 import org.diagbot.common.push.util.PushConstants;
@@ -10,9 +13,7 @@ import org.diagbot.nlp.util.NegativeEnum;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.servlet.http.HttpServletRequest;
 import java.util.*;
-import java.util.regex.Pattern;
 
 /**
  * @ClassName org.diagbot.bigdata.work.ParamsDataProxy
@@ -23,25 +24,6 @@ import java.util.regex.Pattern;
  **/
 public class ParamsDataProxy {
     Logger logger = LoggerFactory.getLogger(ParamsDataProxy.class);
-    //标准词只处理的词性
-    public static NegativeEnum[] negativeEnums = new NegativeEnum[] { NegativeEnum.VITAL_INDEX, NegativeEnum.SYMPTOM
-            , NegativeEnum.DIGITS, NegativeEnum.EVENT_TIME, NegativeEnum.UNIT, NegativeEnum.DIAG_STAND
-            , NegativeEnum.OTHER};
-    //标准词处理的三元组
-    public static NegativeEnum[][] negativeEnumTriple = {
-            { NegativeEnum.VITAL_INDEX, NegativeEnum.DIGITS, NegativeEnum.UNIT },
-            { NegativeEnum.VITAL_INDEX, NegativeEnum.DIGITS, NegativeEnum.EVENT_TIME },
-            { NegativeEnum.SYMPTOM, NegativeEnum.DIGITS, NegativeEnum.UNIT },
-            { NegativeEnum.SYMPTOM, NegativeEnum.DIGITS, NegativeEnum.EVENT_TIME },
-            { NegativeEnum.DIAG_STAND, NegativeEnum.DIGITS, NegativeEnum.UNIT },
-            { NegativeEnum.DIAG_STAND, NegativeEnum.DIGITS, NegativeEnum.EVENT_TIME },
-            { NegativeEnum.DIAG_STAND, NegativeEnum.DIGITS, NegativeEnum.OTHER }
-    };
-    //标准词处理的二元组
-    public static NegativeEnum[][] negativeEnumTwoTuple = {
-            { NegativeEnum.VITAL_INDEX, NegativeEnum.DIGITS },
-            { NegativeEnum.SYMPTOM, NegativeEnum.DIGITS }
-    };
 
     public void createNormalInfo(SearchData searchData) throws Exception {
         //计算年龄区间
@@ -138,6 +120,16 @@ public class ParamsDataProxy {
             featuresList = fa.start(searchData.getDiag(), FeatureType.DIAG);
             paramFeatureInit(searchData, featuresList);
         }
+        if (!StringUtils.isEmpty(searchData.getPacs())) {
+            //关系抽取模型
+            AlgorithmCNNExecutorPacs algorithmCNNExecutor = RelationExtractionFactory.getInstance();
+            RelationExtractionUtil re = new RelationExtractionUtil();
+            //Pacs原始分词结果
+            List<List<String>> execute = algorithmCNNExecutor.execute(searchData.getPacs(), re.createTriad(searchData));
+            if (execute != null && execute.size() > 0) {
+                re.addToSearchDataInputs(execute, searchData);
+            }
+        }
     }
 
     /**

+ 92 - 0
common-push/src/main/java/org/diagbot/common/push/work/RelationExtractionUtil.java

@@ -0,0 +1,92 @@
+package org.diagbot.common.push.work;
+
+import org.algorithm.core.cnn.entity.Lemma;
+import org.algorithm.core.cnn.entity.Triad;
+import org.diagbot.common.push.bean.SearchData;
+import org.diagbot.common.push.cache.ApplicationCacheUtil;
+import org.diagbot.common.push.util.PushConstants;
+import org.diagbot.nlp.participle.ParticipleUtil;
+import org.diagbot.nlp.participle.word.Lexeme;
+import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.util.Constants;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @Description:
+ * @Author: HUJING
+ * @Date: 2019/9/9 17:30
+ */
+public class RelationExtractionUtil {
+    public List<Triad> createTriad(SearchData searchData) throws IOException {
+        List<Triad> triads = new ArrayList<>();
+        String[] pacsSplits = searchData.getPacs().trim().split("。|\n");
+        List<Lemma> lemmaList = new ArrayList<>();
+        Lemma lemma = null;
+        for (String pacsSplit : pacsSplits) {
+            LexemePath<Lexeme> pacsLexemes = ParticipleUtil.participlePacs(pacsSplit);
+            for (int i = 0; i < pacsLexemes.size(); i++) {
+                //跳过非医学词
+                if (Constants.word_property_other.equals(pacsLexemes.get(i).getProperty())) {
+                    continue;
+                }
+                lemma = new Lemma();
+                lemma.setText(pacsLexemes.get(i).getText());
+                lemma.setPosition(String.valueOf(pacsLexemes.get(i).getOffset()) + "," + (Integer.valueOf(pacsLexemes.get(i).getOffset() + pacsLexemes.get(i).getLength()) - 1));
+                lemma.setProperty(PushConstants.featureTypeMap.get(pacsLexemes.get(i).getProperty()));
+                lemmaList.add(lemma);
+            }
+        }
+        for (int i = 0; i < lemmaList.size() - 1; i++) {
+            for (int j = i + 1; j < lemmaList.size(); j++) {
+                Triad triad = new Triad();
+                triad.setL_1(lemmaList.get(i));
+                triad.setL_2(lemmaList.get(j));
+                triads.add(triad);
+            }
+        }
+        return triads;
+    }
+
+    public void addToSearchDataInputs(List<List<String>> relationExtractionContents, SearchData searchData) throws Exception {
+        StringBuffer sb = null;
+        for (List<String> contents : relationExtractionContents) {
+            sb = new StringBuffer();
+            for (String content : contents) {
+                sb.append(content);
+            }
+            if (isExist(sb.toString())) {
+                Map<String, String> map = new HashMap<>();
+                map.put("featureType", Constants.feature_type_pacs);
+                map.put("featureName", sb.toString());
+                map.put("property", Constants.word_property_PACS_Result);
+                map.put("concept", sb.toString());
+                //全是有
+                map.put("negative", Constants.default_negative);
+                if (searchData.getInputs().get(map.get("featureName")) == null) {
+                    searchData.getInputs().put(map.get("featureName"), map);
+                }
+            }
+        }
+    }
+
+    /**
+     * 关系抽取输出的content是否在已有诊断依据中存在
+     * @param content
+     * @return
+     */
+    public boolean isExist(String content){
+        Map<String, Map<String, String>> kl_diagnose_detail_filter_map = ApplicationCacheUtil.getKl_diagnose_detail_filter_map();
+        if (kl_diagnose_detail_filter_map.get("4") != null){
+            if (kl_diagnose_detail_filter_map.get("4").containsKey(content)){
+                return true;
+            }
+        }
+        return false;
+    }
+
+}

+ 7 - 2
graph-web/src/main/java/org/diagbot/graphWeb/work/GraphCalculate.java

@@ -5,7 +5,6 @@ import com.alibaba.fastjson.JSONArray;
 import com.alibaba.fastjson.JSONObject;
 import org.apache.commons.lang3.StringUtils;
 import org.diagbot.common.push.bean.FeatureRate;
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.common.push.bean.ResponseData;
 import org.diagbot.common.push.bean.SearchData;
 import org.diagbot.common.push.bean.neo4j.Filnlly;
@@ -19,7 +18,7 @@ import org.diagbot.graph.jdbc.Neo4jAPI;
 import javax.servlet.http.HttpServletRequest;
 import java.util.*;
 
-import org.diagbot.graphWeb.util.MapValueComparator;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,6 +59,12 @@ public class GraphCalculate {
                 }
             }
         }
+        Map<String, Map<String, String>> inputsMap = searchData.getInputs();
+        if(inputsMap != null && inputsMap.size()>0){
+            if(inputsMap.keySet()!=null && inputsMap.keySet().size()>0){
+                ss.addAll(inputsMap.keySet()) ;
+            }
+        }
         logger.info("从分词系统接收到的词 :" + ss);
         System.out.println("Participle takes: " + (System.currentTimeMillis()-starttime)/1000d + 's');
         List<String> featureTypeList = Arrays.asList(searchData.getFeatureTypes());

+ 5 - 3
graph/src/main/java/org/diagbot/graph/jdbc/Neo4jAPI.java

@@ -957,9 +957,10 @@ public class Neo4jAPI {
                 }
                 //查找是否有页面急诊
                 List<String> webDiagList = new ArrayList<>();
-                for (String wd:webDiagSplits) {
+               /* for (String wd:webDiagSplits) {
                     webDiagList.add("\'"+wd+"\'");
-                }
+                }*/
+                webDiagList.add("\'"+webDiagSplits[0]+"\'");
                 query =propertiesUtil.getProperty("searchEmergency").replace("disList",webDiagList.toString());
                 result = session.run(query);
                 while (result.hasNext()) {
@@ -972,8 +973,9 @@ public class Neo4jAPI {
                     }
                     if(em == 1){
                         stringStringMap.put("页面急诊", "");
+                        diseaseCondition.put(emDis.replace("\"", ""), stringStringMap);
                     }
-                    diseaseCondition.put(emDis.replace("\"", ""), stringStringMap);
+
                 }
             }
             List<String> newDis = new ArrayList<>();

+ 2 - 2
graph/src/main/resources/bolt.properties

@@ -5,9 +5,9 @@ pass_235 = diagbot@20180822
 
 # neo4j bolt credentials
 #\u7EBF\u4E0A\u4F7F\u7528
-bolt.uri=bolt://192.168.2.234
+bolt.uri=bolt://192.168.3.180
 bolt.user=neo4j
-bolt.passwd=root
+bolt.passwd=123456
 
 
 #\u6D4B\u8BD5\u4F7F\u7528

+ 11 - 2
graphdb/src/main/java/org/diagbot/repository/DiseaseRepository.java

@@ -140,12 +140,18 @@ public interface DiseaseRepository extends Neo4jRepository<Disease, Long> {
     //疾病和鉴别诊断创建关系
     @Query("match(d:Disease{disId:{0}}),(s:DifferentDis{name:{1}}) merge(d)-[r:鉴别诊断]->(s)")
     void mergeRelationDifferentDis(Long disId,String name);
-    //创建公表名
+    //创建化验大项
     @Query("merge(l:LIS{name:{0}})")
     void mergePublicLIS(String lisName);
-    //疾病和鉴别诊断创建关系
+    //疾病和化验大项创建关系
     @Query("match(d:Disease{disId:{0}}),(s:LIS{name:{1}}) merge(d)-[r:推荐]->(s)")
     void mergeRelationLIS(Long disId,String name);
+    //创建化验公表项
+    @Query("merge(l:PublicLIS{name:{0}})")
+    void mergePublicLIS_1(String lisName);
+    //疾病和化验公表项创建关系
+    @Query("match(d:Disease{disId:{0}}),(s:PublicLIS{name:{1}}) merge(d)-[r:推荐]->(s)")
+    void mergeRelationLIS_1(Long disId,String name);
     //创建化验结果节点
     @Query("merge(l:LISResult{name:{0}})")
     void mergeLISRESULT(String name);
@@ -155,6 +161,9 @@ public interface DiseaseRepository extends Neo4jRepository<Disease, Long> {
     //化验结果和对应的codeCondition创建关系
     @Query("match(d:Condition{name:{0}}),(s:LISResult{name:{1}}) merge(d)<-[r:诊断依据]-(s)")
     void mergeRelationCondiLisRes(String codeCondition,String lisRes);
+    //公表名和化验结果创建关系
+    @Query("match(d:LIS{name:{0}}),(s:PublicLIS{name:{1}}) merge(d)-[r:化验公表]->(s)")
+    void mergeRelationPublicAndLIS(String lis,String publicLis);
     //创建辅检项目
     @Query("merge(p:PACS{name:{0}})")
     void mergePacs(String pacsName);

+ 45 - 12
graphdb/src/main/java/org/diagbot/service/impl/KnowledgeServiceImpl.java

@@ -396,7 +396,7 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
         List<String> bigdataDiagList = new LinkedList<>();//大数据诊断
         if (bigdataDiagFeature.size() > 0) {
             for (FeatureRate fe : bigdataDiagFeature) {
-                if ("neo4j".equals(fe.getSource()) && fe.getDesc().contains("确诊")) {
+                if ("neo4j".equals(fe.getSource()) && (fe.getDesc().contains("确诊") || fe.getDesc().contains("拟诊"))) {
                     neo4jDiagList.add(fe.getFeatureName());
                 } else if (fe.getDesc() == null) {
                     bigdataDiagList.add(fe.getFeatureName());
@@ -1462,8 +1462,8 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
         Statement st = null;
         ResultSet rs = null;
         try {
-            String type, code ,standard,relation,result,formula,name;
-            String sql = "SELECT diagnose_id,dis_name,`type`,`code`,standard,relation,result,formula FROM `kl_diagnose_detail` where diagnose_id = "+disId+"  and is_deleted = 'N'";
+            String type, code ,standard,relation,unique_name,result,formula,name;
+            String sql = "SELECT diagnose_id,dis_name,`type`,`code`,standard,relation,unique_name,result,formula FROM `kl_diagnose_detail` where diagnose_id = "+disId+"  and is_deleted = 'N'";
             st = connection.createStatement();
             rs = st.executeQuery(sql);
             while (rs.next()){
@@ -1472,6 +1472,7 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                 code = rs.getString("code");
                 standard = rs.getString("standard");
                 relation = rs.getString("relation");
+                unique_name = rs.getString("unique_name");
                 result = rs.getString("result");
                 formula = rs.getString("formula");
                 name = Type.getName(Integer.parseInt(type));
@@ -1480,6 +1481,7 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                 content.put("code",code);
                 content.put("standard",standard);
                 content.put("relation",relation);
+                content.put("unique_name",unique_name);
                 content.put("result",result);
                 content.put("formula",formula);
                 contentList.add(content);
@@ -1703,6 +1705,15 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                 }else if(ni.contains("任七")){
                     path = 7;
                     rel = "任七";
+                }else if(ni.contains("任八")){
+                    path = 8;
+                    rel = "任八";
+                }else if(ni.contains("任九")){
+                    path = 9;
+                    rel = "任九";
+                }else if(ni.contains("任十")){
+                    path = 10;
+                    rel = "任十";
                 }else {
                     path = 1;
                     rel = "任一";
@@ -1787,6 +1798,7 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
             String typeName = row.get("typeName");//类型对应的中文名
             String standard = row.get("standard");//标准词
             String relation = row.get("relation");//关联词
+            String unique_name = row.get("unique_name");//公表项
             String result = row.get("result");//结果
             if(StringUtils.isNotEmpty(type)){
                 String[] split =null;
@@ -1954,9 +1966,22 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                         }
                     }
                 } else if("3".equals(type)){ //处理化验
-                    String lis = "";
+                    String lis = "";//大项
+                    if(StringUtils.isNotEmpty(unique_name)){
+                        //创建化验公表项
+                        diseaseRepository.mergePublicLIS_1(unique_name);
+                        //疾病和化验公表项创建推荐关系
+                        diseaseRepository.mergeRelationLIS_1(disId,unique_name);
+                       if(StringUtils.isNotEmpty(result)){
+                           lis = standard;
+
+                       }else {
+                           lis = unique_name;
+                       }
+                    }
+
                     //查找化验大项的标准词
-                    standWord = searchStandWord(standard,type,ciKu);
+                   /* standWord = searchStandWord(standard,type,ciKu);
                     if(StringUtils.isNotEmpty(standWord)){
                         if(StringUtils.isNotEmpty(relation) && standard.equals(relation)){
                             lis = standWord;
@@ -1968,22 +1993,30 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                                 //查找化验小项的标准词
                                 lis = standWord+"--"+searchStandWord(relation,type,ciKu);
                             }
-
                         }
-                    }
+                    }*/
+
 
-                    //创建公表
+                    //创建大项
                     diseaseRepository.mergePublicLIS(lis);
-                    //疾病推荐化验公表
+                    //疾病推荐化验大项
                     diseaseRepository.mergeRelationLIS(disId,lis);
                     //处理化验结果
-                    if(StringUtils.isNotEmpty(result)){
+                    if(StringUtils.isNotEmpty(result) && StringUtils.isNotEmpty(unique_name)){
                         lisResultSplit = result.split("、");
                         if(lisResultSplit != null && lisResultSplit.length>0){
                             //化验结果找标准词
                             for (String lisRs:lisResultSplit) {
                                 if(StringUtils.isNotEmpty(lisRs)){
-                                    lisResult = searchStandWord(lisRs,"resultLis",ciKu);
+                                    //创建化验结果节点
+                                    diseaseRepository.mergeLISRESULT(lisRs);
+                                    //大项和化验结果创建关系
+                                    diseaseRepository.mergeRelationPublicLIS(lis,lisRs);
+                                    //化验结果和对应的codeCondition创建关系
+                                    diseaseRepository.mergeRelationCondiLisRes(disName+code,lisRs);
+                                    //化验大项和化验公表项创建关系
+                                    diseaseRepository.mergeRelationPublicAndLIS(lis,unique_name);
+                                    /*lisResult = searchStandWord(lisRs,"resultLis",ciKu);
                                     if(StringUtils.isNotEmpty(lisResult)){
                                         //创建化验结果节点
                                         diseaseRepository.mergeLISRESULT(lisResult);
@@ -1991,7 +2024,7 @@ public class    KnowledgeServiceImpl implements KnowledgeService {
                                         diseaseRepository.mergeRelationPublicLIS(lis,lisResult);
                                         //化验结果和对应的codeCondition创建关系
                                         diseaseRepository.mergeRelationCondiLisRes(disName+code,lisResult);
-                                    }
+                                    }*/
 
                                 }
                             }

+ 1 - 1
nlp-web/src/main/java/org/diagbot/nlp/dao/xml/InfoMapper.xml

@@ -113,7 +113,7 @@
 	<select id="selectListWrapper" resultMap="infoWrapperMap" parameterType="java.util.Map">
 		SELECT 
 		<include refid="Base_Column_List"/>
-		FROM doc_info t WHERE 1=1 
+		FROM doc_info t WHERE 1=1
 		<if test="id != null and id != ''">
 				 and t.id = #{id}
 		</if>

+ 1 - 1
nlp-web/src/main/resources/application.yml

@@ -12,7 +12,7 @@ spring:
       charset: UTF-8
       enabled: true
   datasource:       # mybatis 配置,使用druid数据源
-    url: jdbc:mysql://1.1.1.1:3306/med-s?useUnicode=true&characterEncoding=UTF-8
+    url: jdbc:mysql://1.1.1.1:3306/diagbot-app?useUnicode=true&characterEncoding=UTF-8
     username: root
     password: diagbot@20180822
     type: com.alibaba.druid.pool.DruidDataSource

+ 3 - 2
nlp/src/main/java/org/diagbot/nlp/feature/FeatureType.java

@@ -15,7 +15,8 @@ public enum FeatureType {
     TREAT(Constants.feature_type_treat),
     HISTORY(Constants.feature_type_history),
     VITAL_INDEX(Constants.feature_type_vital_index),
-    TIME(Constants.feature_type_time);
+    TIME(Constants.feature_type_time),
+    NONE(Constants.feature_type_default);
 
     FeatureType(String value) {
         this.value = value;
@@ -54,6 +55,6 @@ public enum FeatureType {
             case Constants.feature_type_time:
                 return FeatureType.TIME;
         }
-        return FeatureType.SYMPTOM;
+        return FeatureType.NONE;
     }
 }

+ 17 - 0
nlp/src/main/java/org/diagbot/nlp/rule/analyze/RuleAnalyze.java

@@ -0,0 +1,17 @@
+package org.diagbot.nlp.rule.analyze;
+
+import org.diagbot.nlp.rule.module.PreResult;
+import org.diagbot.nlp.rule.pretreat.Pretreatment;
+import org.diagbot.nlp.rule.pretreat.PretreatmentLis;
+
+import java.util.List;
+
+/**
+ * Created by louhr on 2019/9/25.
+ */
+public class RuleAnalyze {
+    public List<PreResult> lisConvert(String content) throws java.io.IOException {
+        Pretreatment pretreatment = new PretreatmentLis();
+        return pretreatment.analyze(content);
+    }
+}

+ 1 - 1
common-push/src/main/java/org/diagbot/common/push/bean/PreResult.java

@@ -1,4 +1,4 @@
-package org.diagbot.common.push.bean;
+package org.diagbot.nlp.rule.module;
 
 /**
  * Created by louhr on 2019/8/31.

+ 4 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/Pretreatment.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 import org.springframework.util.StringUtils;
@@ -80,4 +80,6 @@ public abstract class Pretreatment {
         }
         return -1;
     }
+
+
 }

+ 2 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentDiag.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 

+ 86 - 0
nlp/src/main/java/org/diagbot/nlp/rule/pretreat/PretreatmentLis.java

@@ -0,0 +1,86 @@
+package org.diagbot.nlp.rule.pretreat;
+
+import org.diagbot.nlp.participle.ParticipleUtil;
+import org.diagbot.nlp.participle.word.Lexeme;
+import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
+import org.diagbot.nlp.util.NegativeEnum;
+import org.diagbot.nlp.util.NlpUtil;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class PretreatmentLis extends Pretreatment {
+    private String join_symbols = ";:;:";
+    protected NegativeEnum[] lis_result = new NegativeEnum[]{NegativeEnum.LIS_RESULT};
+    protected NegativeEnum[] lis_name = new NegativeEnum[]{NegativeEnum.LIS_NAME};
+    protected NegativeEnum[] lis_type = new NegativeEnum[]{NegativeEnum.LIS_TYPE};
+
+    public List<PreResult> analyze(String content) throws java.io.IOException {
+        List<PreResult> preResults = new ArrayList<>();
+        LexemePath<Lexeme> lexemes = ParticipleUtil.participle(content);
+        for (int i = 0; i < lexemes.size(); i++) {
+            Lexeme l = lexemes.get(i);
+            if (NlpUtil.isFeature(l.getProperty(), lis_name)) {
+                cursor = i;
+                PreResult result = new PreResult();
+                result.setDetailName(NlpUtil.concept(l, NegativeEnum.LIS_NAME));
+                result = createPreResult(lexemes, result, i);
+                if (result != null) {
+                    preResults.add(result);
+                }
+            }
+        }
+        return preResults;
+    }
+
+    public PreResult createPreResult(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
+        return null;
+    }
+
+    public PreResult createPreResult(LexemePath<Lexeme> lexemes, PreResult result, int index) {
+        Lexeme leftLexeme;
+        //往前查找化验套餐
+        int position = index - 1;
+        while (position > -1) {
+            leftLexeme = lexemes.get(position);
+            if (NlpUtil.isFeature(leftLexeme.getProperty(), lis_type)) {
+                result.setName(NlpUtil.concept(leftLexeme, NegativeEnum.LIS_TYPE));
+                break;
+            }
+            position--;
+        }
+        //往后查找化验结果
+        int max_find_step = 10;
+        if (index ==  lexemes.size() - 1) return null;
+        position = index++;
+        Lexeme rightLexeme;
+        while (position < (index + max_find_step) &&  position < lexemes.size()) {
+            rightLexeme = lexemes.get(position);
+            //遇上数字查找是否有单位
+            if (NlpUtil.isNumberString(rightLexeme)) {
+                result.setValue(rightLexeme.getText());
+                //继续找单位
+                int offset = 1;
+                while (offset < max_find_step && position + offset < lexemes.size()) {
+                    rightLexeme = lexemes.get(position + offset);
+                    if (NlpUtil.isFeature(rightLexeme.getProperty(), nees_time_and_unit)) {
+                        result.setUnits(rightLexeme.getText());
+                        break;
+                    }
+                    offset++;
+                }
+                break;
+            } else if (NlpUtil.isFeature(rightLexeme.getProperty(), lis_result)) {  //化验结果
+                result.setValue(rightLexeme.getText());
+                break;
+            }
+            position++;
+        }
+        return result;
+    }
+
+    public String findBodyValue(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
+        return null;
+    }
+}

+ 2 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentMakeList.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 

+ 2 - 4
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentNormal.java

@@ -1,11 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
-import org.diagbot.nlp.util.NegativeEnum;
-import org.diagbot.nlp.util.NlpUtil;
+import org.diagbot.nlp.rule.module.PreResult;
 
 import java.util.List;
 

+ 2 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentOther.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 

+ 127 - 0
nlp/src/main/java/org/diagbot/nlp/rule/pretreat/PretreatmentPacs.java

@@ -0,0 +1,127 @@
+package org.diagbot.nlp.rule.pretreat;
+
+import org.diagbot.nlp.participle.ParticipleUtil;
+import org.diagbot.nlp.participle.word.Lexeme;
+import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
+import org.diagbot.nlp.util.NegativeEnum;
+import org.diagbot.nlp.util.NlpUtil;
+
+import java.util.List;
+
+public class PretreatmentPacs extends Pretreatment {
+    protected NegativeEnum[] nees_pacs_result = new NegativeEnum[] { NegativeEnum.PACS_RESULT };
+    protected NegativeEnum[] nees_pacs_name = new NegativeEnum[] { NegativeEnum.PACS_NAME };
+    private String join_symbols = ";:;:";
+
+    public List<PreResult> analyze(String content) throws java.io.IOException {
+        List<PreResult> preResultList = super.analyzeDefault(content);
+        //pacs除了数值型需要转, 还需要对部分检查结果提取,以便做危机警示
+        LexemePath<Lexeme> lexemes = ParticipleUtil.participle(content);
+
+        Lexeme leftLexeme;
+        for (int i = 0; i < lexemes.size(); i++) {
+            Lexeme l = lexemes.get(i);
+            if (NlpUtil.isFeature(l.getProperty(), nees_pacs_result) && i > 0) {
+                int c = i - 1;
+                while (c > -1) {
+                    leftLexeme = lexemes.get(c);
+                    if (NlpUtil.isFeature(leftLexeme.getProperty(), nees_pacs_name)) {
+                        PreResult result = new PreResult();
+                        result.setValue(NlpUtil.concept(l, NegativeEnum.PACS_RESULT));
+                        result.setDetailName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
+                        result.setUniqueName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
+                        preResultList.add(result);
+                        break;
+                    }
+                    c--;
+                }
+            } else if (NlpUtil.isFeature(l.getProperty(), nees_time_and_unit) && i > 0) {
+                PreResult result = data2Object(lexemes, l, i, l.getProperty());
+                if (result != null) {
+                    preResultList.add(result);
+                }
+            }
+        }
+        return preResultList;
+    }
+
+    public PreResult createPreResult(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
+        double value = findNumberValue(lexemes, lexeme, index);
+        if (value == -1) {
+            return null;
+        }
+//        //继续往前找本体
+//        String text = findBodyValue(lexemes, lexeme, index);
+//        if (StringUtils.isEmpty(text)) {
+//            return null;
+//        }
+        PreResult result = new PreResult();
+        result.setValue(String.valueOf(value));
+        result.setUnits(lexeme.getText());
+        return getPreResultPub(lexemes, result);
+    }
+
+    public String findBodyValue(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
+        return null;
+    }
+
+    public PreResult getPreResultPub(LexemePath<Lexeme> lexemes, PreResult result) {
+        //继续往前找辅检明细项
+        if (cursor > 0) {
+            cursor--;
+        }
+        Lexeme leftLexeme = lexemes.get(cursor);
+        if (join_symbols.contains(leftLexeme.getText())) {
+            if (cursor > 0) {
+                cursor--;
+                leftLexeme = lexemes.get(cursor);
+            } else {
+                return null;
+            }
+        }
+        if (NlpUtil.isFeature(leftLexeme.getProperty(), new NegativeEnum[] { NegativeEnum.PACS_NAME })) {
+            result.setDetailName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
+        } else if (NlpUtil.isFeature(leftLexeme.getProperty(), new NegativeEnum[] { NegativeEnum.PACS_NAME })) {
+            result.setUniqueName(NlpUtil.concept(leftLexeme, NegativeEnum.PACS_NAME));
+        } else {
+            return null;
+        }
+        return result;
+    }
+
+    protected double findNumberValue(LexemePath<Lexeme> lexemes, Lexeme lexeme, int index) {
+        if (index < 1) {
+            return -1;
+        }
+        cursor = index - 1;
+        Lexeme leftLexeme = lexemes.get(cursor);
+        if (isNumberString(leftLexeme)) {
+            String[] numbersSplit = leftLexeme.getText().split("\\*");
+            try {
+                if (numbersSplit.length == 2) {
+                    return Double.valueOf(numbersSplit[0]) * Double.valueOf(numbersSplit[1]);
+                } else if (numbersSplit.length == 3) {
+                    return Double.valueOf(numbersSplit[0]) * Double.valueOf(numbersSplit[1])
+                            * Double.valueOf(numbersSplit[2]);
+                } else {
+                    return -1;
+                }
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+        }
+        return -1;
+    }
+
+    public static boolean isNumberString(Lexeme l) {
+        if (l == null) {
+            return false;
+        }
+        if (NlpUtil.isFeature(l.getProperty(), new NegativeEnum[] { NegativeEnum.DIGITS })
+                && l.getText().indexOf("*") != -1) {
+            return true;
+        }
+        return false;
+    }
+}

+ 2 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentSymptom.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 

+ 2 - 2
common-push/src/main/java/org/diagbot/common/push/filter/pretreat/PretreatmentVital.java

@@ -1,9 +1,9 @@
-package org.diagbot.common.push.filter.pretreat;
+package org.diagbot.nlp.rule.pretreat;
 
-import org.diagbot.common.push.bean.PreResult;
 import org.diagbot.nlp.participle.ParticipleUtil;
 import org.diagbot.nlp.participle.word.Lexeme;
 import org.diagbot.nlp.participle.word.LexemePath;
+import org.diagbot.nlp.rule.module.PreResult;
 import org.diagbot.nlp.util.NegativeEnum;
 import org.diagbot.nlp.util.NlpUtil;
 

+ 1 - 0
nlp/src/main/java/org/diagbot/nlp/util/Constants.java

@@ -18,6 +18,7 @@ public class Constants {
     public final static String feature_type_feature = "9"; //症状描述中的特征信息 如部位、性质等
     public final static String feature_type_time = "10";    //提取时间
     public final static String feature_type_vital_index = "42"; //体征指标
+    public final static String feature_type_default = "-1"; //默认返回类型
 
     public static NegativeEnum[] symptom_type = new NegativeEnum[]{NegativeEnum.SYMPTOM, NegativeEnum.SYMPTOM_INDEX, NegativeEnum.SYMPTOM_PERFORMANCE};
     public static NegativeEnum[] unit_time_type = new NegativeEnum[]{NegativeEnum.EVENT_TIME, NegativeEnum.UNIT};

+ 36 - 20
push-web/src/main/java/org/diagbot/push/controller/AlgorithmController.java

@@ -14,6 +14,9 @@ import org.diagbot.common.push.filter.rule.PretreatmentRule;
 import org.diagbot.common.push.work.ParamsDataProxy;
 import org.diagbot.graphWeb.work.GraphCalculate;
 import org.diagbot.graphWeb.work.LisPacsCalculate;
+import org.diagbot.nlp.rule.analyze.RuleAnalyze;
+import org.diagbot.nlp.rule.pretreat.Pretreatment;
+import org.diagbot.nlp.rule.pretreat.PretreatmentLis;
 import org.diagbot.nlp.util.Constants;
 import org.diagbot.nlp.util.NlpCache;
 import org.diagbot.pub.api.Response;
@@ -22,6 +25,7 @@ import org.diagbot.pub.utils.PropertiesUtil;
 import org.diagbot.pub.utils.http.HttpApi;
 import org.diagbot.pub.web.BaseController;
 import org.diagbot.rule.crisis.CrisisApplication;
+import org.diagbot.rule.lis.LisApplication;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.stereotype.Controller;
@@ -49,7 +53,7 @@ public class AlgorithmController extends BaseController {
         listView = "/pages/algorithm/list.html";
     }
 
-    @RequestMapping({"/index"})
+    @RequestMapping({ "/index" })
     public String index() {
         return listView;
     }
@@ -72,7 +76,7 @@ public class AlgorithmController extends BaseController {
         Response<ResponseData> response = new Response<>();
 
         MysqlJdbc nlpJdbc = new MysqlJdbc("root", "diagbot@20180822", "jdbc:mysql://192.168.2.235:3306/med-s?useUnicode=true&characterEncoding=UTF-8");
-        List<Map<String, String>> data = nlpJdbc.query("doc_info_validate", new String[]{"pk_dcpv", "present", "diag", "sex", "age"}, "");
+        List<Map<String, String>> data = nlpJdbc.query("doc_info_validate", new String[] { "pk_dcpv", "present", "diag", "sex", "age" }, "");
 
         Map<String, String> diags = NlpCache.getStandard_info_synonym_map().get(Constants.word_property_diagnose);
 
@@ -122,6 +126,14 @@ public class AlgorithmController extends BaseController {
     public Response<ResponseData> combine(HttpServletRequest request, SearchData searchData) throws Exception {
         ResponseData responseData = new ResponseData();
         logger.info("开始推送服务......");
+        LisApplication lisApplication = new LisApplication();
+        if ((searchData.getLisArr() == null || searchData.getLisArr().size() == 0)
+                && StringUtils.isNotEmpty(searchData.getLis())) {
+            Pretreatment pretreatment = new PretreatmentLis();
+            searchData.setLisArr(pretreatment.analyze(searchData.getLis()));
+        }
+        lisApplication.lisConvertToInputs(searchData.getLisArr(),searchData);
+
         Response<ResponseData> response = new Response();
         //一些基本信息预处理 如年龄 性别
         ParamsDataProxy paramsDataProxy = new ParamsDataProxy();
@@ -170,11 +182,13 @@ public class AlgorithmController extends BaseController {
         Map<String, List<CrisisDetail>> crisisDetails = responseData.getCrisisDetails();
         List<MedicalIndication> crisisDetailsList = this.getCrisisDetails(crisisDetails);
         List<MedicalIndication> medicalIndications = graphResponseData.getMedicalIndications();
-        if (medicalIndications == null) medicalIndications = new ArrayList<>();
+        if (medicalIndications == null) {
+            medicalIndications = new ArrayList<>();
+        }
         medicalIndications.addAll(crisisDetailsList);
         if (medicalIndications != null && medicalIndications.size() > 0) {
             logger.info("指标推送!!!!!!!!!");
-//           bigDataResponseData.getMedicalIndications().addAll(medicalIndications);
+            //           bigDataResponseData.getMedicalIndications().addAll(medicalIndications);
             bigDataResponseData.setMedicalIndications(medicalIndications);
         }
 
@@ -191,11 +205,11 @@ public class AlgorithmController extends BaseController {
         Map<String, String> vitalCache = CacheUtil.getVitalCache();
         List<String> featureList = Arrays.asList(searchData.getFeatureTypes());
         List<FeatureRate> vitals = graphResponseData.getVitals();
-        if(featureList.contains(Constants.feature_type_vital_index) && this.getVital(vitalCache,vitals).size() > 0){
-            bigDataResponseData.setVitals(this.getVital(vitalCache,vitals));
+        if (featureList.contains(Constants.feature_type_vital_index) && this.getVital(vitalCache, vitals).size() > 0) {
+            bigDataResponseData.setVitals(this.getVital(vitalCache, vitals));
 
         }
-        if(featureList.contains(Constants.feature_type_vital) && vitals.size() > 0){
+        if (featureList.contains(Constants.feature_type_vital) && vitals.size() > 0) {
             bigDataResponseData.setVitals(vitals);
         }
 
@@ -211,23 +225,24 @@ public class AlgorithmController extends BaseController {
 
     /**
      * 包装输出的危急值
+     *
      * @param crisisDetails
      * @return
      */
-    public List<MedicalIndication> getCrisisDetails(Map<String, List<CrisisDetail>> crisisDetails){
+    public List<MedicalIndication> getCrisisDetails(Map<String, List<CrisisDetail>> crisisDetails) {
         List<MedicalIndication> crisisDetailList = new ArrayList<>();
-        for(Map.Entry<String, List<CrisisDetail>> entry : crisisDetails.entrySet()) {
-            for (CrisisDetail crisisDetail:entry.getValue()) {
+        for (Map.Entry<String, List<CrisisDetail>> entry : crisisDetails.entrySet()) {
+            for (CrisisDetail crisisDetail : entry.getValue()) {
                 String originText = crisisDetail.getOriginText();//文本输入的数据
                 String remindText = crisisDetail.getRemindText();//危机警示指标
                 String standardText = crisisDetail.getStandardText();//评判标准
-                if(StringUtils.isNotEmpty(remindText)){
+                if (StringUtils.isNotEmpty(remindText)) {
                     MedicalIndication medicalIndication = new MedicalIndication();
                     List<MedicalIndicationDetail> crisisMid = new ArrayList<>();
                     MedicalIndicationDetail medicalIndicationDetail = new MedicalIndicationDetail();
                     JSONObject jsonObject = new JSONObject();
-                    jsonObject.put("controlType",2);
-                    jsonObject.put("name",standardText);
+                    jsonObject.put("controlType", 2);
+                    jsonObject.put("name", standardText);
                     medicalIndicationDetail.setType(4);
                     medicalIndicationDetail.setContent(jsonObject);
                     crisisMid.add(medicalIndicationDetail);
@@ -240,26 +255,27 @@ public class AlgorithmController extends BaseController {
         return crisisDetailList;
     }
 
-    public List<FeatureRate> getVital(Map<String, String> vitalCache,List<FeatureRate> vitals){
+    public List<FeatureRate> getVital(Map<String, String> vitalCache, List<FeatureRate> vitals) {
         Set<String> vitalSet = new LinkedHashSet<>();
         List<FeatureRate> vitalList = new ArrayList<>();
-        if(vitals != null){
-            for (FeatureRate f:vitals) {
+        if (vitals != null) {
+            for (FeatureRate f : vitals) {
                 String s = vitalCache.get(f.getFeatureName());
-                if(StringUtils.isNotEmpty(s)){
+                if (StringUtils.isNotEmpty(s)) {
                     vitalSet.add(s);
                 }
             }
         }
-        if(vitalSet != null){
-            for (String vi:vitalSet) {
-                FeatureRate featureRate= new FeatureRate();
+        if (vitalSet != null) {
+            for (String vi : vitalSet) {
+                FeatureRate featureRate = new FeatureRate();
                 featureRate.setFeatureName(vi);
                 vitalList.add(featureRate);
             }
         }
         return vitalList;
     }
+
     public Response<ResponseData> algorithm(HttpServletRequest request, SearchData searchData) throws Exception {
         Response<ResponseData> response = new Response();
 

+ 1 - 0
push-web/src/main/java/org/diagbot/push/controller/CacheFileManagerController.java

@@ -45,6 +45,7 @@ public class CacheFileManagerController extends BaseController {
         ApplicationCacheUtil.createDoc_result_mapping_filter_map();
         ApplicationCacheUtil.create_kl_rule_filter_map();
         ApplicationCacheUtil.create_kl_rule_app_filter_map();
+        ApplicationCacheUtil.create_kl_diagnose_detail_filter_map();
         return response;
     }
 }

+ 21 - 0
rule/src/main/java/org/diagbot/rule/crisis/CrisisApplication.java

@@ -77,4 +77,25 @@ public class CrisisApplication {
         }
         return crisisMap;
     }
+
+    private String mergeStandardText(Rule rule) {
+        //标准值最优先匹配
+        StringBuffer sb = new StringBuffer();
+        if (org.apache.commons.lang3.StringUtils.isNotEmpty(rule.getEq_value())) {
+            sb.append(rule.getPub_name()).append(rule.getEq_operator()).append(rule.getEq_value());
+        } else if (org.apache.commons.lang3.StringUtils.isNotEmpty(rule.getMax_value()) && org.apache.commons.lang3.StringUtils.isNotEmpty(rule.getMin_value())) {
+            sb.append(rule.getPub_name()).append(rule.getMin_operator()).append(rule.getMin_value())
+                    .append(rule.getMin_unit()).append(";")
+                    .append(rule.getPub_name()).append(rule.getMax_operator()).append(rule.getMax_value())
+                    .append(rule.getMax_unit());
+        } else if (org.apache.commons.lang3.StringUtils.isNotEmpty(rule.getMin_value())) {
+            sb.append(rule.getPub_name()).append(rule.getMin_operator()).append(rule.getMin_value())
+                    .append(rule.getMin_unit());
+        } else if (org.apache.commons.lang3.StringUtils.isNotEmpty(rule.getMax_value())) {
+            sb.append(rule.getPub_name()).append(rule.getMax_operator()).append(rule.getMax_value())
+                    .append(rule.getMax_unit());
+        }
+        return sb.toString();
+    }
+
 }

+ 97 - 0
rule/src/main/java/org/diagbot/rule/lis/LisApplication.java

@@ -0,0 +1,97 @@
+package org.diagbot.rule.lis;
+
+import org.diagbot.common.push.bean.SearchData;
+import org.diagbot.nlp.rule.module.PreResult;
+import org.diagbot.nlp.util.Constants;
+import org.springframework.util.StringUtils;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @Description:
+ * @Author: HUJING
+ * @Date: 2019/9/25 13:33
+ */
+public class LisApplication {
+    private static String up = "升高";
+    private static String down = "降低";
+    private static String normal = "正常";
+
+    public void lisConvertToInputs(List<PreResult> preResultList, SearchData searchData) {
+        if (preResultList != null) {
+            return;
+        }
+        String covertValue = "";
+        for (PreResult result : preResultList) {
+            if (!StringUtils.isEmpty(result.getUniqueName())) {
+                //value是数值类型则进行转换
+                if (!StringUtils.isEmpty(result.getValue()) && isNumber(result.getValue())) {
+                    if (!StringUtils.isEmpty(result.getMaxValue()) && !StringUtils.isEmpty(result.getMinValue())
+                            && isNormal(result.getValue(), result.getMaxValue(), result.getMinValue())) {
+                        covertValue = result.getUniqueName() + normal;
+                    } else if (!StringUtils.isEmpty(result.getMaxValue())
+                            && compareMax(result.getValue(), result.getMaxValue())) {
+                        covertValue = result.getUniqueName() + up;
+                    } else if (!StringUtils.isEmpty(result.getMinValue())
+                            && compareMin(result.getValue(), result.getMinValue())) {
+                        covertValue = result.getUniqueName() + down;
+                    }
+                } else if (!StringUtils.isEmpty(result.getOtherValue())) {
+                    //otherValue是文本类型则直接与UniqueName拼接
+                    covertValue = result.getUniqueName() + result.getOtherValue();
+                }
+
+                Map<String, String> map = new HashMap<>();
+                map.put("featureType", Constants.feature_type_lis);
+                map.put("featureName", covertValue);
+                map.put("property", Constants.word_property_LIS_Result);
+                map.put("concept", covertValue);
+                //全是有
+                map.put("negative", Constants.default_negative);
+                if (searchData.getInputs().get(map.get("featureName")) == null) {
+                    searchData.getInputs().put(map.get("featureName"), map);
+                }
+            }
+        }
+    }
+
+
+    private boolean isNormal(String value, String maxValue, String minValue) {
+        try {
+            return Double.valueOf(value) < Double.valueOf(maxValue) && Double.valueOf(value) > Double.valueOf(minValue);
+        } catch (Exception e) {
+        }
+        return false;
+    }
+
+    private boolean compareMin(String value, String minValue) {
+        try {
+            return Double.valueOf(value) < Double.valueOf(minValue);
+        } catch (Exception e) {
+        }
+        return false;
+    }
+
+    private boolean compareMax(String value, String maxValue) {
+        try {
+            return Double.valueOf(value) > Double.valueOf(maxValue);
+        } catch (Exception e) {
+        }
+        return false;
+    }
+
+    public static boolean isNumber(String value) {
+        if (StringUtils.isEmpty(value)) {
+            return false;
+        }
+        for (char c : value.toCharArray()) {
+            if (c >= '0' && c <= '9') {
+                return true;
+            }
+        }
+        return false;
+    }
+}