|
@@ -1,16 +1,22 @@
|
|
|
package org.diagbot.bigdata.work;
|
|
|
|
|
|
import org.algorithm.util.AlgorithmClassify;
|
|
|
+import org.diagbot.bigdata.common.ApplicationCacheUtil;
|
|
|
import org.diagbot.bigdata.util.BigDataConstants;
|
|
|
import org.diagbot.common.work.SearchData;
|
|
|
import org.diagbot.nlp.feature.FeatureAnalyze;
|
|
|
import org.diagbot.nlp.feature.FeatureType;
|
|
|
+import org.diagbot.nlp.participle.ParticipleUtil;
|
|
|
+import org.diagbot.nlp.participle.word.Lexeme;
|
|
|
+import org.diagbot.nlp.participle.word.LexemePath;
|
|
|
import org.diagbot.nlp.util.Constants;
|
|
|
+import org.diagbot.nlp.util.NegativeEnum;
|
|
|
import org.slf4j.Logger;
|
|
|
import org.slf4j.LoggerFactory;
|
|
|
import org.springframework.util.StringUtils;
|
|
|
|
|
|
import javax.servlet.http.HttpServletRequest;
|
|
|
+import java.util.ArrayList;
|
|
|
import java.util.HashMap;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
@@ -24,6 +30,8 @@ import java.util.Map;
|
|
|
**/
|
|
|
public class ParamsDataProxy {
|
|
|
Logger logger = LoggerFactory.getLogger(ParamsDataProxy.class);
|
|
|
+ ParticipleUtil util = new ParticipleUtil();
|
|
|
+
|
|
|
public void createSearchData(HttpServletRequest request, SearchData searchData) throws Exception {
|
|
|
//消除空格
|
|
|
if (searchData.getSymptom() != null) {
|
|
@@ -41,9 +49,15 @@ public class ParamsDataProxy {
|
|
|
if (StringUtils.isEmpty(searchData.getResourceType())) {
|
|
|
searchData.setResourceType(BigDataConstants.resource_type_o);
|
|
|
}
|
|
|
+ //给症状末尾添加诊断依据标准词
|
|
|
+ ApplicationCacheUtil.getKl_result_mapping_standword_map();
|
|
|
+ LexemePath<Lexeme> featureData = util.participle(searchData.getSymptom());
|
|
|
+ if (null != featureData) {
|
|
|
+ addStandWord(featureData, ApplicationCacheUtil.kl_result_mapping_standword_map, searchData);
|
|
|
+ }
|
|
|
//所有信息参与推送
|
|
|
searchData.setSymptom(searchData.getSymptom() + searchData.getVital()
|
|
|
- + searchData.getLis() + searchData.getPacs() + searchData.getPast() + searchData.getOther()+ searchData.getIndications());
|
|
|
+ + searchData.getLis() + searchData.getPacs() + searchData.getPast() + searchData.getOther() + searchData.getIndications());
|
|
|
searchData.setSymptom(searchData.getSymptom().trim());
|
|
|
//一次推送多个类别信息
|
|
|
String[] featureTypes = searchData.getFeatureType().split(",");
|
|
@@ -69,6 +83,7 @@ public class ParamsDataProxy {
|
|
|
|
|
|
/**
|
|
|
* featureType转算法模型类型
|
|
|
+ *
|
|
|
* @param sysCode
|
|
|
* @param featureTypes
|
|
|
* @param searchData
|
|
@@ -92,7 +107,7 @@ public class ParamsDataProxy {
|
|
|
case DIAG:
|
|
|
if (reverse) {
|
|
|
classifies[i] = null;
|
|
|
- } else {
|
|
|
+ } else {
|
|
|
classifies[i] = AlgorithmClassify.NEURAL_DIAG;
|
|
|
}
|
|
|
break;
|
|
@@ -139,6 +154,7 @@ public class ParamsDataProxy {
|
|
|
|
|
|
/**
|
|
|
* 外部系统featureType需要转化为大数据定义的featureType
|
|
|
+ *
|
|
|
* @param sysCode
|
|
|
* @param featureType
|
|
|
* @return
|
|
@@ -179,7 +195,7 @@ public class ParamsDataProxy {
|
|
|
*/
|
|
|
private void paramFeatureInit(SearchData searchData, List<Map<String, Object>> featuresList) throws Exception {
|
|
|
if (featuresList != null && featuresList.size() > 0) {
|
|
|
-// BeanUtils.setProperty(searchData, property_list, featuresList);
|
|
|
+ // BeanUtils.setProperty(searchData, property_list, featuresList);
|
|
|
Map<String, Object> featureMap = null;
|
|
|
for (int i = 0; i < featuresList.size(); i++) {
|
|
|
featureMap = featuresList.get(i);
|
|
@@ -199,4 +215,136 @@ public class ParamsDataProxy {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 给SearchData中症状末尾添加诊断依据标准词
|
|
|
+ *
|
|
|
+ * @param lexemes
|
|
|
+ * @param standWords
|
|
|
+ * @param sData
|
|
|
+ * @return
|
|
|
+ */
|
|
|
+ public SearchData addStandWord(List<Lexeme> lexemes, Map<String, List<Map<String, String>>> standWords, SearchData sData) {
|
|
|
+ List<Lexeme> feature = new ArrayList<>();
|
|
|
+ //收集分词结果中体征指标或体征指标值(数字)
|
|
|
+ for (Lexeme lexeme : lexemes) {
|
|
|
+ NegativeEnum lexemeNegativeEnum = NegativeEnum.parseOfValue(lexeme.getProperty());
|
|
|
+ if (lexemeNegativeEnum == NegativeEnum.VITAL_INDEX || lexemeNegativeEnum == NegativeEnum.DIGITS
|
|
|
+ || lexemeNegativeEnum == NegativeEnum.EVENT_TIME || lexemeNegativeEnum == NegativeEnum.UNIT
|
|
|
+ ) {
|
|
|
+ feature.add(lexeme);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ //根据收集到的分词结果把体征指标和对应体征指标值(数字)拼接
|
|
|
+ List<String> featureType = new ArrayList<>();
|
|
|
+ for (int i = 0; i < feature.size(); i++) {
|
|
|
+ boolean featureTypeState = true;
|
|
|
+ if (i < feature.size() - 2) {
|
|
|
+ if ((NegativeEnum.parseOfValue(feature.get(i).getProperty()) == NegativeEnum.VITAL_INDEX
|
|
|
+ && NegativeEnum.parseOfValue(feature.get(i + 1).getProperty()) == NegativeEnum.DIGITS
|
|
|
+ && NegativeEnum.parseOfValue(feature.get(i + 2).getProperty()) == NegativeEnum.EVENT_TIME)
|
|
|
+ ||
|
|
|
+ (NegativeEnum.parseOfValue(feature.get(i).getProperty()) == NegativeEnum.VITAL_INDEX
|
|
|
+ && NegativeEnum.parseOfValue(feature.get(i + 1).getProperty()) == NegativeEnum.DIGITS
|
|
|
+ && NegativeEnum.parseOfValue(feature.get(i + 2).getProperty()) == NegativeEnum.UNIT)) {
|
|
|
+ featureType.add(feature.get(i).getText() + "\t" + feature.get(i + 1).getText() + "\t"
|
|
|
+ + feature.get(i + 2).getText());
|
|
|
+ featureTypeState = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (i < feature.size() - 1 && featureTypeState) {
|
|
|
+ if (NegativeEnum.parseOfValue(feature.get(i).getProperty()) == NegativeEnum.VITAL_INDEX
|
|
|
+ && NegativeEnum.parseOfValue(feature.get(i + 1).getProperty()) == NegativeEnum.DIGITS) {
|
|
|
+ featureType.add(feature.get(i).getText() + "\t" + feature.get(i + 1).getText());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ //将标准词中体征指标值(数字)与分词结果中体征指标值(数字)比较
|
|
|
+ for (String f : featureType) {
|
|
|
+ String[] features = f.split("\t");
|
|
|
+ if (standWords.containsKey(features[0])) {
|
|
|
+ List<Map<String, String>> standWordList = standWords.get(features[0]);
|
|
|
+ for (Map<String, String> standWordMap : standWordList) {
|
|
|
+ if (standWordMap.containsKey("unit") && standWordMap.containsKey("value")) {
|
|
|
+ if (features.length == 2) {
|
|
|
+ judgment(sData, features, standWordMap);
|
|
|
+ } else {
|
|
|
+ if (standWordMap.get("unit").equals(features[2])) {
|
|
|
+ judgment(sData, features, standWordMap);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else if (standWordMap.containsKey("value")) {
|
|
|
+ if (features.length == 2) {
|
|
|
+ judgment(sData, features, standWordMap);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return sData;
|
|
|
+ }
|
|
|
+
|
|
|
+ //将标准词中体征指标值(数字)与分词结果中体征指标值(数字)比较
|
|
|
+ private void judgment(SearchData sData, String[] features, Map<String, String> standWordMap) {
|
|
|
+ if (">".equals(standWordMap.get("op"))) {
|
|
|
+ //单独处理 血压≥140/90mmHg 类似情况
|
|
|
+ if (features[1].contains("/")) {
|
|
|
+ if (standWordMap.get("value").contains("/")) {
|
|
|
+ String[] feature = features[1].split("/");
|
|
|
+ Integer featuresSBP = Integer.valueOf(feature[0]); //分词特征收缩压
|
|
|
+ Integer featuresDBP = Integer.valueOf(feature[1]); //分词特征舒张压
|
|
|
+
|
|
|
+ String[] values = standWordMap.get("value").split("/");
|
|
|
+ Integer standWordSBP = Integer.valueOf(values[0]); //标准词收缩压
|
|
|
+ Integer standWordDBP = Integer.valueOf(values[1]); //标准词舒张压
|
|
|
+ if (featuresSBP > standWordSBP && featuresDBP > standWordDBP) {
|
|
|
+ sData.setSymptom(sData.getSymptom() + "," + standWordMap.get("standword"));
|
|
|
+ System.out.println(sData.getSymptom());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ String num = getNum(standWordMap.get("value"));
|
|
|
+ if (Double.valueOf(getNum(features[1])) > Double.valueOf(num)) {
|
|
|
+ sData.setSymptom(sData.getSymptom() + "," + standWordMap.get("standword"));
|
|
|
+ System.out.println(sData.getSymptom());
|
|
|
+
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else if ("<".equals(standWordMap.get("op"))) {
|
|
|
+ //单独处理 血压小于90/60mmHg 类似情况
|
|
|
+ if (standWordMap.get("value").contains("/")) {
|
|
|
+ if (features[1].contains("/")) {
|
|
|
+ String[] feature = features[1].split("/");
|
|
|
+ Integer featuresSBP = Integer.valueOf(feature[0]); //收缩压
|
|
|
+ Integer featuresDBP = Integer.valueOf(feature[1]); //舒张压
|
|
|
+
|
|
|
+ String[] values = standWordMap.get("value").split("/");
|
|
|
+ Integer standWordSBP = Integer.valueOf(values[0]); //收缩压
|
|
|
+ Integer standWordDBP = Integer.valueOf(values[1]); //舒张压
|
|
|
+ if (featuresSBP < standWordSBP && featuresDBP < standWordDBP) {
|
|
|
+ sData.setSymptom(sData.getSymptom() + "," + standWordMap.get("standword"));
|
|
|
+ System.out.println(sData.getSymptom());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ String num = getNum(standWordMap.get("value"));
|
|
|
+ if (Double.valueOf(getNum(features[1])) < Double.valueOf(num)) {
|
|
|
+ sData.setSymptom(sData.getSymptom() + "," + standWordMap.get("standword"));
|
|
|
+ System.out.println(sData.getSymptom());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private String getNum(String standWord) {
|
|
|
+ StringBuffer sb = new StringBuffer();
|
|
|
+ for (String num : standWord.replaceAll("[^0-9]", ",").split(",")) {
|
|
|
+ if (num.length() > 0) {
|
|
|
+ sb.append(num);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return sb.toString();
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
}
|