Browse Source

1、增加后台更新缓存接口

louhr 5 years ago
parent
commit
4e7688e753

+ 120 - 101
bigdata-web/src/main/java/org/diagbot/bigdata/common/ApplicationCacheUtil.java

@@ -52,6 +52,10 @@ public class ApplicationCacheUtil {
         return standard_info_type_tree_map;
     }
 
+    /**
+     * 現已無用
+     * @return
+     */
     public static Map<String, String> getDoc_result_mapping_vital_map() {
         if (doc_result_mapping_vital_map == null) {
             Configuration configuration = new DefaultConfig();
@@ -62,131 +66,146 @@ public class ApplicationCacheUtil {
 
     public static Map<String, String> getDoc_result_mapping_diag_map() {
         if (doc_result_mapping_diag_map == null) {
-            Configuration configuration = new DefaultConfig();
-            doc_result_mapping_diag_map = configuration.loadMapDict("doc_result_mapping_diag.dict");
+            createDoc_result_mapping_diag_map();
         }
         return doc_result_mapping_diag_map;
     }
 
-    public static Map<String, Map<String, ResultMappingFilter>> getDoc_result_mapping_filter_map() {
+    public static Map<String, String> createDoc_result_mapping_diag_map() {
         Configuration configuration = new DefaultConfig();
+        doc_result_mapping_diag_map = configuration.loadMapDict("bigdata_diag_2_dept.dict");
+        return doc_result_mapping_diag_map;
+    }
+
+    public static Map<String, Map<String, ResultMappingFilter>> getDoc_result_mapping_filter_map() {
         if (doc_result_mapping_filter_map == null) {
-            List<String> fileContents = configuration.readFileContents("doc_result_mapping_filter.dict");
-            String[] line_string;
-            List<ResultMappingFilter> resultMappingFilters = new ArrayList<>();
-            try {
-                for (int i = 0; i < fileContents.size(); i++) {
-                    line_string = org.apache.commons.lang3.StringUtils.split(fileContents.get(i), "\\|");
-                    if (line_string.length == 5) {
-                        ResultMappingFilter resultMappingFilter = new ResultMappingFilter();
-                        resultMappingFilter.setFeatureName(line_string[0]);
-                        resultMappingFilter.setFeatureType(line_string[1]);
-                        resultMappingFilter.setSex(line_string[2]);
-                        resultMappingFilter.setAgeStart(Integer.parseInt(line_string[3]));
-                        resultMappingFilter.setAgeEnd(Integer.parseInt(line_string[4]));
-                        resultMappingFilters.add(resultMappingFilter);
-                    }
+            createDoc_result_mapping_filter_map();
+        }
+        return doc_result_mapping_filter_map;
+    }
+
+    public static Map<String, Map<String, ResultMappingFilter>> createDoc_result_mapping_filter_map() {
+        Configuration configuration = new DefaultConfig();
+        List<String> fileContents = configuration.readFileContents("bigdata_sex_age_filter.dict");
+        String[] line_string;
+        List<ResultMappingFilter> resultMappingFilters = new ArrayList<>();
+        try {
+            for (int i = 0; i < fileContents.size(); i++) {
+                line_string = org.apache.commons.lang3.StringUtils.split(fileContents.get(i), "\\|");
+                if (line_string.length == 5) {
+                    ResultMappingFilter resultMappingFilter = new ResultMappingFilter();
+                    resultMappingFilter.setFeatureName(line_string[0]);
+                    resultMappingFilter.setFeatureType(line_string[1]);
+                    resultMappingFilter.setSex(line_string[2]);
+                    resultMappingFilter.setAgeStart(Integer.parseInt(line_string[3]));
+                    resultMappingFilter.setAgeEnd(Integer.parseInt(line_string[4]));
+                    resultMappingFilters.add(resultMappingFilter);
                 }
-            } catch (Exception e) {
-                e.printStackTrace();
             }
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
 
-            doc_result_mapping_filter_map = new HashMap<>();
-            Map<String, ResultMappingFilter> filterMap = null;
-            for (ResultMappingFilter resultMappingFilter : resultMappingFilters) {
-                filterMap = doc_result_mapping_filter_map.get(resultMappingFilter.getFeatureType());
-                if (filterMap == null) {
-                    filterMap = new HashMap<>();
-                }
-                filterMap.put(resultMappingFilter.getFeatureName(), resultMappingFilter);
-                doc_result_mapping_filter_map.put(resultMappingFilter.getFeatureType(), filterMap);
+        doc_result_mapping_filter_map = new HashMap<>();
+        Map<String, ResultMappingFilter> filterMap = null;
+        for (ResultMappingFilter resultMappingFilter : resultMappingFilters) {
+            filterMap = doc_result_mapping_filter_map.get(resultMappingFilter.getFeatureType());
+            if (filterMap == null) {
+                filterMap = new HashMap<>();
             }
+            filterMap.put(resultMappingFilter.getFeatureName(), resultMappingFilter);
+            doc_result_mapping_filter_map.put(resultMappingFilter.getFeatureType(), filterMap);
         }
         return doc_result_mapping_filter_map;
     }
 
     public static Map<String, List<Map<String, String>>> getKl_result_mapping_standword_map() {
         if (kl_result_mapping_standword_map == null) {
-            kl_result_mapping_standword_map = new HashMap<>();
-            Configuration configuration = new DefaultConfig();
-            List<String> fileContents = configuration.readFileContents("kl_result_mapping_standword.dict");
-            List<Map<String, String>> standWordObjValList = null;
-            Map<String, String> standWordObjVal = null;
-            String operation = ">=|≥|>|大于|>|超过|<=|≤|<|小于|<|少于";
-            try {
-                for (String fileContent : fileContents) {
-                    LexemePath<Lexeme> lexemes = null;
-                    String op = "";
-                    String[] fileContentSplit = null;
-                    //每一个标准词根据大于小于符号切开,不然进行分词时还是会得到原本的标准词
-                    if (fileContent.contains(">") || fileContent.contains("大于")
-                            || fileContent.contains(">") || fileContent.contains("超过")) {
-                        op = ">";
-                        fileContentSplit = fileContent.split(operation);
-                    } else if (fileContent.contains("<") || fileContent.contains("小于")
-                            || fileContent.contains("<") || fileContent.contains("少于")) {
-                        op = "<";
-                        fileContentSplit = fileContent.split(operation);
-                    } else if (fileContent.contains(">=") || fileContent.contains("≥")){
-                        op = ">=";
-                        fileContentSplit = fileContent.split(operation);
-                    } else if (fileContent.contains("<=") || fileContent.contains("≤")) {
-                        op = "<=";
-                        fileContentSplit = fileContent.split(operation);
-                    } else {
-                        continue;
-                    }
-                    LexemePath<Lexeme> lexemeWord = null;
-                    //每一个标准词切开后进行分词
-                    for (String fileContentWords : fileContentSplit) {
-                        lexemeWord = ParticipleUtil.participle(fileContentWords);
-                        if (lexemeWord != null) {
-                            if (null == lexemes) {
-                                lexemes = lexemeWord;
-                            } else {
-                                for (Lexeme lexeme : lexemeWord) {
-                                    lexemes.add(lexeme);
-                                }
+            createKl_result_mapping_standword_map();
+        }
+        return kl_result_mapping_standword_map;
+    }
+
+    public static Map<String, List<Map<String, String>>> createKl_result_mapping_standword_map() {
+        kl_result_mapping_standword_map = new HashMap<>();
+        Configuration configuration = new DefaultConfig();
+        List<String> fileContents = configuration.readFileContents("kl_result_mapping_standword.dict");
+        List<Map<String, String>> standWordObjValList = null;
+        Map<String, String> standWordObjVal = null;
+        String operation = ">=|≥|>|大于|>|超过|<=|≤|<|小于|<|少于";
+        try {
+            for (String fileContent : fileContents) {
+                LexemePath<Lexeme> lexemes = null;
+                String op = "";
+                String[] fileContentSplit = null;
+                //每一个标准词根据大于小于符号切开,不然进行分词时还是会得到原本的标准词
+                if (fileContent.contains(">") || fileContent.contains("大于")
+                        || fileContent.contains(">") || fileContent.contains("超过")) {
+                    op = ">";
+                    fileContentSplit = fileContent.split(operation);
+                } else if (fileContent.contains("<") || fileContent.contains("小于")
+                        || fileContent.contains("<") || fileContent.contains("少于")) {
+                    op = "<";
+                    fileContentSplit = fileContent.split(operation);
+                } else if (fileContent.contains(">=") || fileContent.contains("≥")){
+                    op = ">=";
+                    fileContentSplit = fileContent.split(operation);
+                } else if (fileContent.contains("<=") || fileContent.contains("≤")) {
+                    op = "<=";
+                    fileContentSplit = fileContent.split(operation);
+                } else {
+                    continue;
+                }
+                LexemePath<Lexeme> lexemeWord = null;
+                //每一个标准词切开后进行分词
+                for (String fileContentWords : fileContentSplit) {
+                    lexemeWord = ParticipleUtil.participle(fileContentWords);
+                    if (lexemeWord != null) {
+                        if (null == lexemes) {
+                            lexemes = lexemeWord;
+                        } else {
+                            for (Lexeme lexeme : lexemeWord) {
+                                lexemes.add(lexeme);
                             }
                         }
                     }
-                    String standWordObjKey = "";
-                    standWordObjValList = new ArrayList<>();
-                    standWordObjVal = new HashMap<>();
-                    int i = 0;
-                    for (Lexeme lexeme : lexemes) {
-                        i++;
-                        if (lexeme.getProperty().contains(",")) {
-                            setProterty(lexeme); //如果分词后词性有多个,只选一个(暂时只处理症状,体征)
-                        }
-                        NegativeEnum lexemeNegativeEnum = NegativeEnum.parseOfValue(lexeme.getProperty());
-                        if (lexemeNegativeEnum == NegativeEnum.SYMPTOM || lexemeNegativeEnum == NegativeEnum.CAUSE
-                                || lexemeNegativeEnum == NegativeEnum.VITAL_INDEX) {
-                            if (!kl_result_mapping_standword_map.containsKey(lexeme.getText())) {
-                                kl_result_mapping_standword_map.put(lexeme.getText(), standWordObjValList);
-                            } else {
-                                standWordObjKey = lexeme.getText();
-                            }
-                        } else if (lexemeNegativeEnum == NegativeEnum.DIGITS) {
-                            standWordObjVal.put("value", lexeme.getText());
-                        } else if (lexemeNegativeEnum == NegativeEnum.UNIT
-                                || lexemeNegativeEnum == NegativeEnum.EVENT_TIME) {
-                            standWordObjVal.put("unit", lexeme.getText().toLowerCase());
+                }
+                String standWordObjKey = "";
+                standWordObjValList = new ArrayList<>();
+                standWordObjVal = new HashMap<>();
+                int i = 0;
+                for (Lexeme lexeme : lexemes) {
+                    i++;
+                    if (lexeme.getProperty().contains(",")) {
+                        setProterty(lexeme); //如果分词后词性有多个,只选一个(暂时只处理症状,体征)
+                    }
+                    NegativeEnum lexemeNegativeEnum = NegativeEnum.parseOfValue(lexeme.getProperty());
+                    if (lexemeNegativeEnum == NegativeEnum.SYMPTOM || lexemeNegativeEnum == NegativeEnum.CAUSE
+                            || lexemeNegativeEnum == NegativeEnum.VITAL_INDEX) {
+                        if (!kl_result_mapping_standword_map.containsKey(lexeme.getText())) {
+                            kl_result_mapping_standword_map.put(lexeme.getText(), standWordObjValList);
+                        } else {
+                            standWordObjKey = lexeme.getText();
                         }
-                        if (lexemes.size() == i) {
-                            standWordObjVal.put("op", op);
-                            standWordObjVal.put("standword", fileContent);
-                            if (kl_result_mapping_standword_map.containsKey(standWordObjKey)) {
-                                kl_result_mapping_standword_map.get(standWordObjKey).add(standWordObjVal);
-                            } else {
-                                standWordObjValList.add(standWordObjVal);
-                            }
+                    } else if (lexemeNegativeEnum == NegativeEnum.DIGITS) {
+                        standWordObjVal.put("value", lexeme.getText());
+                    } else if (lexemeNegativeEnum == NegativeEnum.UNIT
+                            || lexemeNegativeEnum == NegativeEnum.EVENT_TIME) {
+                        standWordObjVal.put("unit", lexeme.getText().toLowerCase());
+                    }
+                    if (lexemes.size() == i) {
+                        standWordObjVal.put("op", op);
+                        standWordObjVal.put("standword", fileContent);
+                        if (kl_result_mapping_standword_map.containsKey(standWordObjKey)) {
+                            kl_result_mapping_standword_map.get(standWordObjKey).add(standWordObjVal);
+                        } else {
+                            standWordObjValList.add(standWordObjVal);
                         }
                     }
                 }
-            } catch (Exception e) {
-                e.printStackTrace();
             }
+        } catch (Exception e) {
+            e.printStackTrace();
         }
         return kl_result_mapping_standword_map;
     }

+ 5 - 5
common-push/src/main/java/org/diagbot/common/push/cache/CacheFileManager.java

@@ -192,7 +192,7 @@ public class CacheFileManager {
                     "and k1.lib_type in (1, 18)";
             st = conn.createStatement();
             rs = st.executeQuery(sql);
-            fw = new FileWriter(path + "common_sex_age_filter.dict");
+            fw = new FileWriter(path + "graph_sex_age_filter.dict");
             String r1, r4, r5;
             while (rs.next()) {
                 r1 = rs.getString(1);//术语名称
@@ -229,7 +229,7 @@ public class CacheFileManager {
             st = conn.createStatement();
             rs = st.executeQuery(sql);
             List<Map.Entry<String, String>> libaryList = rsToMap(rs, true);
-            fw = new FileWriter(path + "common_diag_2_dept.dict");
+            fw = new FileWriter(path + "graph_diag_2_dept.dict");
             for (Map.Entry<String, String> entry : libaryList) {
                 fw.write(encrypDES.encrytor(entry.getKey() + "|" + entry.getValue()));
                 fw.write("\n");
@@ -239,7 +239,7 @@ public class CacheFileManager {
             sql = "SELECT g.diag_level1,g.diag_level2,g.diag_level3 FROM `kl_disease_normalize` g ";
             st = conn.createStatement();
             rs = st.executeQuery(sql);
-            fw = new FileWriter(path + "diag_normalize.dict");
+            fw = new FileWriter(path + "graph_diag_normalize.dict");
             while (rs.next()) {
                 r1 = rs.getString(1);
                 r2 = rs.getString(2);
@@ -274,7 +274,7 @@ public class CacheFileManager {
             rs = st.executeQuery(sql);
             List<Map.Entry<String, String>> libraryList = rsToMap(rs, true);
 
-            FileWriter fw = new FileWriter(path + "common_diag_2_dept.dict");
+            FileWriter fw = new FileWriter(path + "bigdata_diag_2_dept.dict");
             for (Map.Entry<String, String> entry : libraryList) {
                 fw.write(encrypDES.encrytor(entry.getKey() + "|" + entry.getValue()));
                 fw.write("\n");
@@ -288,7 +288,7 @@ public class CacheFileManager {
                     "and k1.lib_type in (1, 18)";
             st = conn.createStatement();
             rs = st.executeQuery(sql);
-            fw = new FileWriter(path + "doc_result_mapping_filter.dict");
+            fw = new FileWriter(path + "bigdata_sex_age_filter.dict");
 
             String r1, r2, r3, r4, r5;
             while (rs.next()) {

+ 43 - 21
graph/src/main/java/org/diagbot/graph/util/CacheUtil.java

@@ -9,7 +9,6 @@ import java.util.List;
 import java.util.Map;
 
 public class CacheUtil {
-    public static Map<String,String> lexionMap=null;
     public static Map<String,Map<String,String>> sexAgeMap=null;
     public static Map<String,String> vitalMap=null;
     public static Map<String,String> disgSortMap=null;
@@ -20,7 +19,14 @@ public class CacheUtil {
 
     //疾病科室,获取缓存
     public static Map<String,String> getDiagDepartCache(){
-        String path = "doc_result_mapping_diag.dict";
+        if (doc_result_mapping_diag_map == null) {
+            createDiagDepartCache();
+        }
+        return doc_result_mapping_diag_map;
+    }
+
+    public static Map<String,String> createDiagDepartCache(){
+        String path = "graph_diag_2_dept.dict";
         Configuration configuration = new DefaultConfig();
         if(doc_result_mapping_diag_map == null){
             doc_result_mapping_diag_map = new HashMap<>();
@@ -35,7 +41,14 @@ public class CacheUtil {
 
     //疾病分级解析,获取缓存
     public static Map<String,Integer> getDiagClassifyJiCache(){
-        String path = "diagClassify.dict";
+        if (diagClassifyJiMap == null) {
+            createDiagClassifyJiCache();
+        }
+        return diagClassifyJiMap;
+    }
+
+    public static Map<String,Integer> createDiagClassifyJiCache(){
+        String path = "graph_diag_normalize.dict";
         Configuration configuration = new DefaultConfig();
         if(diagClassifyJiMap == null){
             diagClassifyJiMap = new HashMap<>();
@@ -65,9 +78,17 @@ public class CacheUtil {
         }
         return diagClassifyJiMap;
     }
+
     //疾病归一解析,获取缓存
     public static Map<String,String> getDiagClassifyCache(){
-        String path = "diagClassify.dict";
+        if (diagClassifyMap == null) {
+            createDiagClassifyCache();
+        }
+        return diagClassifyMap;
+    }
+
+    public static Map<String,String> createDiagClassifyCache(){
+        String path = "graph_diag_normalize.dict";
         Configuration configuration = new DefaultConfig();
         if(diagClassifyMap == null){
             diagClassifyMap = new HashMap<>();
@@ -81,7 +102,7 @@ public class CacheUtil {
                     bigdiagName = splits[1].trim();//疾病大类
                     split = bigdiagName.split("\\、");
                     for (String dis:split
-                         ) {
+                            ) {
                         diagClassifyMap.put(dis, diagName);
                     }
 
@@ -108,7 +129,7 @@ public class CacheUtil {
     public static Map<String,String> createDiagSortCache(){
         disgSortMap = new HashMap<>();
 
-        String path = "diagSort.dict";
+        String path = "graph_diag_classify.dict";
         Configuration configuration = new DefaultConfig();
         List<String> contentList = configuration.readFileContents(path);
         for (String s:contentList) {
@@ -120,22 +141,15 @@ public class CacheUtil {
         return disgSortMap;
     }
 
-    public static Map<String,String> getLexionCache(){
-        String path = "lexicon.dict";
-        Configuration configuration = new DefaultConfig();
-        if(lexionMap == null){
-            lexionMap = new HashMap<>();
-            List<String> contentList = configuration.readFileContents(path);
-            for (String s:contentList) {
-                String[] splits = s.split("\\|");
-                lexionMap.put(splits[0],splits[1]);
-            }
-
+    public static Map<String,Map<String,String>> getSexAgeCache(){
+        if (sexAgeMap == null) {
+            createSexAgeCache();
         }
-        return lexionMap;
+        return sexAgeMap;
     }
-    public static Map<String,Map<String,String>> getSexAgeCache(){
-        String path = "sexAge.dict";
+
+    public static Map<String,Map<String,String>> createSexAgeCache(){
+        String path = "graph_sex_age_filter.dict";
         Configuration configuration = new DefaultConfig();
         if(sexAgeMap == null){
             sexAgeMap = new HashMap<>();
@@ -154,8 +168,16 @@ public class CacheUtil {
         }
         return sexAgeMap;
     }
+
     public static Map<String,String> getVitalCache(){
-        String path = "vital.dict";
+        if (vitalMap == null) {
+            createVitalCache();
+        }
+        return vitalMap;
+    }
+
+    public static Map<String,String> createVitalCache(){
+        String path = "graph_vital_convert.dict";
         Configuration configuration = new DefaultConfig();
         if(vitalMap == null){
             vitalMap = new HashMap<>();

+ 12 - 4
push-web/src/main/java/org/diagbot/push/controller/CacheFileManagerController.java

@@ -1,5 +1,6 @@
 package org.diagbot.push.controller;
 
+import org.diagbot.bigdata.common.ApplicationCacheUtil;
 import org.diagbot.common.push.cache.CacheFileManager;
 import org.diagbot.common.work.ResponseData;
 import org.diagbot.graph.util.CacheUtil;
@@ -25,16 +26,23 @@ public class CacheFileManagerController extends BaseController {
         Response<ResponseData> response = new Response<ResponseData>();
         CacheFileManager cacheFileManager = new CacheFileManager();
         cacheFileManager.createCacheFile();
-
+        //自然语言处理缓存更新
         NlpCache.createSegmentCache();
         NlpCache.createPushCache();
         NlpCache.createClassifyCache();
         NlpCache.createChronicCache();
         NlpCache.createPropertyPairCache();
-
+        //更新图谱缓存
         CacheUtil.createDiagSortCache();
-        CacheUtil.getSexAgeCache();
-        CacheUtil.getVitalCache();
+        CacheUtil.createSexAgeCache();
+        CacheUtil.createVitalCache();
+        CacheUtil.createDiagClassifyCache();
+        CacheUtil.createDiagClassifyJiCache();
+        CacheUtil.createDiagDepartCache();
+        //更新大数据缓存
+        ApplicationCacheUtil.createDoc_result_mapping_diag_map();
+        ApplicationCacheUtil.createDoc_result_mapping_filter_map();
+        ApplicationCacheUtil.createKl_result_mapping_standword_map();
         return response;
     }
 }