Commit afd9953a authored by haoyanbin's avatar haoyanbin

Audio

parent b1c6043f
......@@ -11,10 +11,11 @@ def create_app(test_config=None):
DATABASE=os.path.join(app.instance_path, 'apps.sqlite'),
)
from app.controller import search,importdata,neodata,auth
app.register_blueprint(search.bp)
# from app.controller import search,importdata,neodata,auth
from app.controller import importdata,auth
# app.register_blueprint(search.bp)
app.register_blueprint(importdata.bp)
app.register_blueprint(neodata.bp)
# app.register_blueprint(neodata.bp)
app.register_blueprint(auth.bp)
if test_config is None:
......
......@@ -3,15 +3,737 @@ import os
import random
import re
import time
from flask import Blueprint, json
from flask import Blueprint, json, request
import requests
from app.model.base import dbconn, dbconn2, dbconn3
from bs4 import BeautifulSoup
from http import HTTPStatus
from dashscope.audio.asr import Transcription
import json
bp = Blueprint('importdata', __name__, url_prefix='/importdata')
@bp.route('/audio_test', methods=['POST', 'GET'])
def audio_test():
req = request.get_json()
# 若没有将API Key配置到环境变量中,需将下面这行代码注释放开,并将apiKey替换为自己的API Key
import dashscope
dashscope.api_key = "sk-d73786459e4940cfba6bd0e48d8d28cd"
transcribe_response = Transcription.async_call(
model='paraformer-v2',
file_urls=[req['file_url']],
language_hints=['zh', 'en'], # “language_hints”只支持paraformer-v2模型
diarization_enabled=True,
speaker_count=req['speaker_count']
)
while True:
if transcribe_response.output.task_status == 'SUCCEEDED' or transcribe_response.output.task_status == 'FAILED':
break
transcribe_response = Transcription.fetch(task=transcribe_response.output.task_id)
if transcribe_response.status_code == HTTPStatus.OK:
print(json.dumps(transcribe_response.output, indent=4, ensure_ascii=False))
print('transcription done!')
return transcribe_response.output
@bp.route('/ai_test3')
def ai_test3():
url = "http://47.94.203.143:9001/api_v2/ai_format"
msg = '''```
发言人1:下午好,小公主叫什么名字?看起来有点心事。
发言人2:她叫绵绵,最近总用后腿蹬耳朵,毛都抓秃了。
发言人1:幼儿里有好多咖啡渣样的分泌物,这种情况持续多久了?洗澡时有进水吗?
发言人2:大概两周多,上周去宠物店美容的时候,就开始频繁的甩了个头,但是店员说用的是干燥箱。
发言人1:平时是散养还是关龙?家里有其他宠物吗?
发言人2:他在家都是自由活动的,家里面还有只布偶,不过那只最近眼睛发炎了。
发言人1:绵绵好乖,我们做个快速染色,两小只。平时会互相舔毛吗?最近换猫砂或清洁剂吗?
发言人2:他们俩的关系还是特别好的。猫砂从膨润土换成了豆腐砂,一个多月了。这和耳朵有关。
发言人1:您看这些椭圆形的包子,这是马拉色菌过度繁殖的表现,不过别担心。嗯,这个喷嚏频率高吗?
发言人2:对,最近打喷嚏的次数好像变多了,我还以为他是换季着凉了。
发言人1:可能需要补充做个孢疹病毒试纸检测。虽然疫苗齐全,但多猫环境容易。绵绵的鸳鸯眼真特别是天生异瞳吗?
发言人2:领养的时候,救助的人说这是虹膜异色症,当时就觉得这张眼睛比较好看。
发言人1:病毒检测阴性可以排除猫鼻支,综合来看是耳道真菌感染并轻微过敏,建议每周两次药浴,配合抗真菌第二剂。对了,暂时换回原来的猫砂会更稳妥。
发言人2:需要隔离吗?他们总睡同一个猫窝。
发言人1:最好分开睡垫。真菌孢子真菌孢子容易传通过织物传播。我给您开瓶环境消毒喷雾,记得重点处理抓板区域。呃,这是家里另外一位小祖宗吧,眼睛发炎的话可以顺带检查下。
发言人2:太好了,她叫雪球,我这就回去挂号,听到没回家不许再给姐姐舔耳朵了。
发言人1:下次带雪球来,记得提前近16小时,说不定要抽血检查过敏原。毕竟两位公主的排场可不能含糊。好玩。
```
'''
params = {
'msg': msg
}
reply = requests.post(url=url,json=params)
content_json = json.loads(reply.content)
return '1'
@bp.route('/ai_test2')
def ai_test2():
url = "http://47.94.203.143:9001/api_v2/qwen3"
msg = '''
现在有一份宠物医生与患病宠物的主人之间的对话,现有如下要求:
1.请判断出发言人1和发言人2的身份并替换掉原文中的名称;2.找出对话中不准确和不专业的单词并修改进原文,无需额外解释。
以下是对话内容:
```
发言人1:下午好,小公主叫什么名字?看起来有点心事。
发言人2:她叫绵绵,最近总用后腿蹬耳朵,毛都抓秃了。
发言人1:幼儿里有好多咖啡渣样的分泌物,这种情况持续多久了?洗澡时有进水吗?
发言人2:大概两周多,上周去宠物店美容的时候,就开始频繁的甩了个头,但是店员说用的是干燥箱。
发言人1:平时是散养还是关龙?家里有其他宠物吗?
发言人2:他在家都是自由活动的,家里面还有只布偶,不过那只最近眼睛发炎了。
发言人1:绵绵好乖,我们做个快速染色,两小只。平时会互相舔毛吗?最近换猫砂或清洁剂吗?
发言人2:他们俩的关系还是特别好的。猫砂从膨润土换成了豆腐砂,一个多月了。这和耳朵有关。
发言人1:您看这些椭圆形的包子,这是马拉色菌过度繁殖的表现,不过别担心。嗯,这个喷嚏频率高吗?
发言人2:对,最近打喷嚏的次数好像变多了,我还以为他是换季着凉了。
发言人1:可能需要补充做个孢疹病毒试纸检测。虽然疫苗齐全,但多猫环境容易。绵绵的鸳鸯眼真特别是天生异瞳吗?
发言人2:领养的时候,救助的人说这是虹膜异色症,当时就觉得这张眼睛比较好看。
发言人1:病毒检测阴性可以排除猫鼻支,综合来看是耳道真菌感染并轻微过敏,建议每周两次药浴,配合抗真菌第二剂。对了,暂时换回原来的猫砂会更稳妥。
发言人2:需要隔离吗?他们总睡同一个猫窝。
发言人1:最好分开睡垫。真菌孢子真菌孢子容易传通过织物传播。我给您开瓶环境消毒喷雾,记得重点处理抓板区域。呃,这是家里另外一位小祖宗吧,眼睛发炎的话可以顺带检查下。
发言人2:太好了,她叫雪球,我这就回去挂号,听到没回家不许再给姐姐舔耳朵了。
发言人1:下次带雪球来,记得提前近16小时,说不定要抽血检查过敏原。毕竟两位公主的排场可不能含糊。好玩。
```
'''
i = 0
while i < 200:
params = {
'msg': msg
}
reply = requests.post(url=url,json=params)
content_json = json.loads(reply.content)
i += 1
data1 = {
'api': "format6",
'prompt': msg,
'think': content_json['think'],
'result': content_json['answer']
}
dbconn.insert(table='ai_test', data=data1)
return '1'
@bp.route('/ai_test1')
def ai_test1():
url = "http://47.94.203.143:9001/api_v2/qwen3"
msg = '''
现在有一份宠物医生与患病宠物的主人之间的对话,现有如下要求:
请按照"KV"格式生成一条数据,其中K为【宠物名称,性别,年龄,主诉,病因,临床诊断,治疗方案,预后,医嘱】字段,V为对应的值,切每一组KV用【;】分隔,无需额外解释。
以下是对话内容:
```
医生:您好,请坐。宠物叫什么名字?
宠物主人:医生好,它叫球球,这两天一直拉肚子,精神状态也差。
医生:球球真乖。咱们先了解下情况,您发现它腹泻大概多久了?便便形态是水状还是糊状?
宠物主人:前天开始的,开始是软便,昨天变成了黄褐色的黏液状,今天早上还带有点血丝。
医生:最近有没有换过狗粮或者误食过骨头、塑料袋之类的东西?
宠物主人:上周换了新牌子的幼犬粮,不过按说明慢慢过渡的。对了,昨天他啃了我半块掉在地上的奶油蛋糕。
医生:突然的高脂肪食物可能刺激肠胃。体温量过吗?呕吐过吗?
宠物主人:今天早上用耳温计测试39.2度,然后呕吐两次胆汁样液体,现在连鸡肉干都不吃了。
医生:球球别怕,我们轻轻听一听。平时驱虫按时做吗?疫苗都打全了吗?
宠物主人:上个月刚去过去犬瘟热、细小病毒、腺病毒三联疫苗,今年加强过。然后会不会是细小?
医生:您观察的很仔细,腹部有明显压痛,不过咱们先别自己吓自己,我建议做个血常规和粪便PCR检测,这样能准确区分是细菌感染?寄生虫还是病毒性疾病?球球这名字真可爱,是有什么特别含义吗?
宠物主人:他小时候就喜欢追着网球跑,圆滚滚的像个小毛球。
医生:好消息,细小病毒检测阴性,白细胞升高明显。粪便里有大量致病性大肠杆菌,应该是饮食不当引发的急性肠炎。咱们先补充输液补充电解质,配合抗生素治疗。这两天胃肠道处方罐头,您看可以吗?
宠物主人:那太好了,那大概需要住几天?
医生:观察三天比较稳妥,我们24小时都有护理措施,护理到位了,回家可以准备些南瓜泥,对修复肠道粘膜有帮助。球球喜欢看窗外吗?我们住院部有全景玻璃房,输液时可以给他选个阳光充足的位置。
宠物主人:那太好了,他最喜欢晒太阳,麻烦您多费心了。
医生:好的。
```
'''
i = 0
while i < 51:
params = {
'msg': msg
}
reply = requests.post(url=url,json=params)
content_json = json.loads(reply.content)
i += 1
data1 = {
'api': "report3",
'prompt': msg,
'think': content_json['think'],
'result': content_json['answer']
}
dbconn.insert(table='ai_test', data=data1)
return '1'
@bp.route('/ai_test_format')
def ai_test_format():
old_id = '0'
i = 0
size = 100
while i <= 10:
page = i * size
sql_str = 'SELECT id, api, result ' \
' FROM ai_test ' \
' where api = "format6" and id >' + old_id + \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn2.query(
sql=sql_str
)
i += 1
if len(rows) == 0:
break
for val1 in rows:
s1 = val1['result'].strip('\n').strip('```')
s1 = s1.strip('\n')
# if len(s1) < 2 :
# s1 = val1['result'].strip('\n').split(';')
is_restart = 0
# for val2 in s1:
# if val2 == '':
# continue
if '发言人' in s1:
is_restart = 1
if is_restart == 0 :
data1 = {
'old_id' : val1['id'],
'api': val1['api'],
'result' : s1,
}
dbconn.insert(table='ai_test_res', data=data1)
return '1'
@bp.route('/ai_test_report1')
def ai_test_report1():
old_id = '1'
i = 0
size = 100
while i <= 10:
page = i * size
sql_str = 'SELECT id, api, result ' \
' FROM ai_test ' \
' where api = "report3" and id >' + old_id + \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn2.query(
sql=sql_str
)
i += 1
if len(rows) == 0:
break
for val1 in rows:
s1 = val1['result'].strip('\n').split(';')
if len(s1) < 2 :
s1 = val1['result'].strip('\n').split(';')
pet_name = ''
pet_sex = ''
pet_age = ''
chief_complaint = ''
clinical = ''
suspected_disease = ''
prognosis = ''
treatment = ''
pet_name = ''
doctors_advice = ''
for val2 in s1:
if val2 == '':
continue
s2 = val2.strip().split(':')
if len(s2) <= 1:
s2 = val2.strip().split(':')
if s2[0] == '宠物名称':
pet_name = s2[1]
if s2[0] == '性别':
pet_sex = s2[1]
if s2[0] == '年龄':
pet_age = s2[1]
if s2[0] == '主诉':
chief_complaint = s2[1]
if s2[0] == '病因':
suspected_disease = s2[1]
if s2[0] == '临床诊断':
clinical = s2[1]
if s2[0] == '治疗方案':
treatment = s2[1]
if s2[0] == '预后':
prognosis = s2[1]
if s2[0] == '医嘱':
doctors_advice = s2[1]
data1 = {
'old_id' : val1['id'],
'api': val1['api'],
'pet_name' : pet_name,
'pet_sex' : pet_sex,
'pet_age' : pet_age,
'chief_complaint' : chief_complaint,
'suspected_disease' : suspected_disease,
'clinical' : clinical,
'treatment' : treatment,
'prognosis' : prognosis,
'doctors_advice' : doctors_advice,
}
dbconn.insert(table='ai_report', data=data1)
return '1'
@bp.route('/data_ch_report')
def data_ch_report():
old_id = '466342'
i = 0
size = 500
while i <= 600:
page = i * size
sql_str = 'SELECT rp.id, rp.report_main_orgin_id, rp.species, rp.breed, rp.patient_gender_code, rp.weight, rp.is_sterilization, rp.chief_complaint'\
', rp.phys_examination, rp.suspected_disease, rp.treatment, rp.operation_record, rp.doctors_advice, rp.return_visit ' \
' FROM ch_report.ch_report_patient as rp ' \
' where rp.chief_complaint != "" and rp.id >' + old_id + \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn2.query(
sql=sql_str
)
i += 1
print(i)
if len(rows) == 0:
break
for val1 in rows:
sql_str2 = 'SELECT re.diagnosis_cate_name, re.diagnosis_subject, re.diagnosis_summary' \
' FROM ch_report.ch_report_evalute as re ' \
' where re.report_main_orgin_id ="' + val1['report_main_orgin_id'] +'"'
rows2 = dbconn2.query(
sql=sql_str2
)
data1 = {
'orgin_id': val1['report_main_orgin_id'],
'species': val1['species'],
'breed': val1['breed'],
'patient_gender_code': val1['patient_gender_code'],
'weight': val1['weight'],
'is_sterilization': val1['is_sterilization'],
'chief_complaint': val1['chief_complaint'],
'phys_examination': val1['phys_examination'],
'suspected_disease': val1['suspected_disease'],
'treatment': val1['treatment'],
'operation_record': val1['operation_record'],
'doctors_advice': val1['doctors_advice'],
'return_visit': val1['return_visit'],
'old_id': val1['id'],
}
if len(rows2) >0 :
data1['diagnosis_cate_name'] = rows2[0]['diagnosis_cate_name']
data1['diagnosis_subject'] = rows2[0]['diagnosis_subject']
data1['diagnosis_summary'] = rows2[0]['diagnosis_summary']
dbconn.insert(table='ch_data4', data=data1)
return '1'
@bp.route('/xd_cate_info_rpl')
def xd_cate_info_rpl():
sql_str = 'SELECT id, content FROM xd_cate_info WHERE content like "%&nbsp;%" '
rows = dbconn.query(
sql=sql_str
)
for v in rows:
content = v['content'].replace("&nbsp;","")
data1 = {
'content' : content
}
dbconn.update(table='xd_cate_info', data=data1,condition={"id":v['id']})
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/xd_data_res')
def xd_data_res():
sql_str = 'SELECT xc.id, xc.name ' \
' FROM xd_cate_info as xci left join xd_cate as xc on xci.xd_cate_id = xc.id ' \
' WHERE xc.children = 4 ' \
' group by xd_cate_id '
rows = dbconn.query(
sql=sql_str
)
for v in rows:
sql_str2 = 'SELECT title, content ' \
' FROM xd_cate_info ' \
' WHERE xd_cate_id = '+ str(v['id'])
rows2 = dbconn.query(
sql=sql_str2
)
content = ''
for v2 in rows2:
content += v2['title']+v2['content']+';'
sql_str3 = 'SELECT xc2.name ' \
' FROM xd_cate as xc left join xd_cate as xc2 on xc.pid = xc2.id ' \
' WHERE xc.id = '+ str(v['id'])
rows3 = dbconn.query(
sql=sql_str3
)
name_p = ''
if rows3[0]['name'] != '症状描述':
name_p = rows3[0]['name']
sql_str4 = 'SELECT xt.name, xt.introduce, xt.goods_name ' \
' FROM xd_cate_treatment as xct left join xd_treatment as xt on xt.id = xct.treatment_id ' \
' WHERE xct.cate_id = '+ str(v['id'])
rows4 = dbconn.query(
sql=sql_str4
)
treatment = ''
for v4 in rows4:
treatment += v4['name']+':'+v4['introduce']+'\n治疗方案:'+v4['goods_name']+';'
data1 = {
'name': v['name'],
'name_p': name_p,
'content':content,
'treatment':treatment
}
# messages.append(data1)
dbconn.insert(table='xd_data_res', data=data1)
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/data_ill_res3')
def data_ill_res3():
#' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
sql_str = 'SELECT kc.id, lcs.lab_category_id, kc.keyword, kc.clinical, GROUP_CONCAT(lc.name order by lc.name) as name' \
' FROM keyword_clinical as kc ' \
' left join lab_category_sympeons as lcs on kc.clinical = lcs.name ' \
' left join lab_category as lc on lc.id = lcs.lab_category_id ' \
' WHERE lcs.lab_category_id > 0 and kc.id > 0 group by kc.keyword, kc.clinical'
rows = dbconn.query(
sql=sql_str
)
for v in rows:
chief_complaint = replace_self(v['keyword'])
phys_examination = replace_self(v['clinical'])
suspected_diseases = replace_self(v['name'])
sql_str2 = 'SELECT illness_name, drug_name ' \
' FROM data_illness_durg ' \
' WHERE drug_name != "" and cate_id = '+ str(v['lab_category_id'])
rows2 = dbconn.query(
sql=sql_str2
)
treatment = ''
for v2 in rows2:
treatment += v2['illness_name']+':'+v2['drug_name']+';'
sql_str3 = 'SELECT data ' \
' FROM data_illness_note ' \
' WHERE data != "" and cate_id = '+ str(v['lab_category_id'])
rows3 = dbconn.query(
sql=sql_str3
)
doctors_advice = ''
for v3 in rows3:
doctors_advice += v3['data']+';'
data1 = {
'chief_complaint': chief_complaint,
'phys_examination': phys_examination,
'suspected_disease': suspected_diseases,
'treatment': treatment,
'doctors_advice': doctors_advice,
'old_id':v['id']
}
# messages.append(data1)
dbconn.insert(table='ill_data_res3', data=data1)
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/data_ill_res2')
def data_ill_res2():
#' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
sql_str = 'SELECT kc.id, lcs.lab_category_id, kc.keyword, kc.clinical, lc.name ' \
' FROM keyword_clinical as kc ' \
' left join lab_category_sympeons as lcs on kc.clinical = lcs.name ' \
' left join lab_category as lc on lc.id = lcs.lab_category_id ' \
' WHERE kc.id > 0'
rows = dbconn.query(
sql=sql_str
)
for v in rows:
chief_complaint = replace_self(v['keyword'])
phys_examination = replace_self(v['clinical'])
suspected_diseases = replace_self(v['name'])
sql_str2 = 'SELECT illness_name, drug_name ' \
' FROM data_illness_durg ' \
' WHERE drug_name != "" and cate_id = '+ str(v['lab_category_id'])
rows2 = dbconn.query(
sql=sql_str2
)
treatment = ''
for v2 in rows2:
treatment += v2['illness_name']+':'+v2['drug_name']+';'
sql_str3 = 'SELECT data ' \
' FROM data_illness_note ' \
' WHERE data != "" and cate_id = '+ str(v['lab_category_id'])
rows3 = dbconn.query(
sql=sql_str3
)
doctors_advice = ''
for v3 in rows3:
doctors_advice += v3['data']+';'
data1 = {
'chief_complaint': chief_complaint,
'phys_examination': phys_examination,
'suspected_disease': suspected_diseases,
'treatment': treatment,
'doctors_advice': doctors_advice,
'old_id':v['id']
}
# messages.append(data1)
dbconn.insert(table='ill_data_res2', data=data1)
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/data_ill_res')
def data_ill_res():
#' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
sql_str = 'SELECT id, cate_id, name, data ' \
' FROM data_illness_symptoms ' \
' WHERE data != "" and id > 0'
rows = dbconn.query(
sql=sql_str
)
for v in rows:
chief_complaint = replace_self(v['data'])
suspected_diseases = replace_self(v['name'])
sql_str2 = 'SELECT illness_name, drug_name ' \
' FROM data_illness_durg ' \
' WHERE drug_name != "" and cate_id = '+ str(v['cate_id'])
rows2 = dbconn.query(
sql=sql_str2
)
treatment = ''
for v2 in rows2:
treatment += v2['illness_name']+':'+v2['drug_name']+';'
sql_str3 = 'SELECT data ' \
' FROM data_illness_note ' \
' WHERE data != "" and cate_id = '+ str(v['cate_id'])
rows3 = dbconn.query(
sql=sql_str3
)
doctors_advice = ''
for v3 in rows3:
doctors_advice += v3['data']+';'
data1 = {
'chief_complaint': chief_complaint,
'suspected_disease': suspected_diseases,
'treatment': treatment,
'doctors_advice': doctors_advice,
'old_id':v['id']
}
# messages.append(data1)
dbconn.insert(table='ill_data_res', data=data1)
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/new_tem')
def new_tem():
i = 0
size = 100
while i <= 10:
page = i * size
i += 1
sql_str = 'select `tem_name`, `describe`, `diagnosis` from medical_platform.dcm_tem ' \
' where tem_type = 1 and lang = 1 and id >92' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn3.query(
sql=sql_str
)
if len(rows) == 0:
break
for val in rows:
data1 = {
'tem_name': Translate(val['tem_name']),
'describe': Translate(val['describe']),
'diagnosis': Translate(val['diagnosis']),
'user_id': 1,
'lang': 2,
'tem_type': 1,
}
dbconn.insert(table='medical_platform.ill_data_res', data=data1)
return '1'
def Translate(data):
if data == "":
return ""
source_lang ="zh"
target_lang ="cht"
trans_type ="y"
TranslateToken = "JSY5jBTdd73iOO/6oFYyy/eGmVwip0NwrvLFeGvYzJy7PWpCJTn8euCYEvjZK3Xl"
url = 'https://translate.wuyoufanyi.com/api/Translate'
headers = {
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0",
"Authorization":"Bearer "+TranslateToken
}
params = {"text":data, "source_lang": source_lang, "target_lang":target_lang,"trans_type":trans_type}
result = requests.request('POST', url=url, headers=headers, json=params)
rows = json.loads(result.text)
return rows['data']
@bp.route('/data_getimage')
def data_getimage():
i = 0
......@@ -479,7 +1201,7 @@ def data_med():
def data_cate_goods():
i = 1
size = 100
while i <= 48:
while i <= 2:
page = (i-1) * size
sql_str = 'select xc.id, reply from xd_data1 as xd left join xd_cate as xc on xd.old_id = xc.old_id limit ' + str(page) + ', ' + str(size)
......@@ -590,8 +1312,8 @@ def rep_xd_data2():
url = 'https://his.betacat.co/api/pet/v1/admin/goods/center/list?categoryId=0&pageSize=10&pageIndex='+str(i)+'&categoryName=&searchKeys=&activeStatus=-1'
headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNWRlMDNhMDAtMTM0Mi0xMWVmLWI1NTctYmRhMGJjZmYyMTFjIiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNyAwOTozOTo1OCIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE2IDA5OjM5OjU4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.IXuN9Fmc2tcR0208SvvvMzoLVPOBZ6kPhCdoV0Y_lXg",
"Cookie": "Hm_lvt_58a76bea9bf2c966c440612843fe56ef=1713406021,1715823556; status=false; Hm_lpvt_58a76bea9bf2c966c440612843fe56ef=1715836124; loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNWRlMDNhMDAtMTM0Mi0xMWVmLWI1NTctYmRhMGJjZmYyMTFjIiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNyAwOTozOTo1OCIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE2IDA5OjM5OjU4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.IXuN9Fmc2tcR0208SvvvMzoLVPOBZ6kPhCdoV0Y_lXg",
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiMTI4Mzg4ZTAtZjVjNC0xMWVmLTkzYzctODc1MWVmMDg4YWU0IiwiY3JlYXRlZEF0IjoiMjAyNS0wMi0yOCAxOTowNjo0OCIsImVtcGxveWVlTm8iOiIyMzA2MzE2NTUxNzQ4MzYyMjQiLCJleHBpcmVkQXQiOiIyMDI1LTAzLTMwIDE5OjA2OjQ4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5byg5ZCJ5pyLIiwicGVybWlzc2lvbkxpc3QiOltdLCJwaG9uZSI6IjE3NzgzMzE2MTI3Iiwic2hvcE5vIjoiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIiwic2hvcE5vcyI6WyIyMDIzMDIwNjE1MTkwNjE2NDAwMTAwMDEiXSwic2hvcFJvbGVJRHMiOls3MCw3MCw3MF0sInNob3BSb2xlTGlzdCI6W3siSUQiOjcwLCJUaXRsZSI6IueuoeeQhuWRmCJ9XSwic2hvcFRpdGxlIjoi6L-F5b635a6i5pyNIn0.QfcWmQD2btrAZygVgkNbaV4naDkLLhkqaNOZB3FsINo",
"Cookie": "loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiMTI4Mzg4ZTAtZjVjNC0xMWVmLTkzYzctODc1MWVmMDg4YWU0IiwiY3JlYXRlZEF0IjoiMjAyNS0wMi0yOCAxOTowNjo0OCIsImVtcGxveWVlTm8iOiIyMzA2MzE2NTUxNzQ4MzYyMjQiLCJleHBpcmVkQXQiOiIyMDI1LTAzLTMwIDE5OjA2OjQ4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5byg5ZCJ5pyLIiwicGVybWlzc2lvbkxpc3QiOltdLCJwaG9uZSI6IjE3NzgzMzE2MTI3Iiwic2hvcE5vIjoiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIiwic2hvcE5vcyI6WyIyMDIzMDIwNjE1MTkwNjE2NDAwMTAwMDEiXSwic2hvcFJvbGVJRHMiOls3MCw3MCw3MF0sInNob3BSb2xlTGlzdCI6W3siSUQiOjcwLCJUaXRsZSI6IueuoeeQhuWRmCJ9XSwic2hvcFRpdGxlIjoi6L-F5b635a6i5pyNIn0.QfcWmQD2btrAZygVgkNbaV4naDkLLhkqaNOZB3FsINo",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0"
}
......@@ -633,6 +1355,8 @@ def data_ch_cate():
return "1"
# update xd_cate set is_set = 0 where id not in (select xd_cate_id from `xd_cate_info` group by xd_cate_id)
# update xd_cate set is_set = 0 where id not in (select cate_id from `xd_cate_treatment` group by cate_id)
@bp.route('/rep_xd_data1')
def rep_xd_data1():
category3 = dbconn.query(
......@@ -644,8 +1368,8 @@ def rep_xd_data1():
url = 'https://his.betacat.co/api/pet/v1/config/preDiagnosis/'+str(val3['old_id'])
headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNDc3MWMyMTAtMTMyNS0xMWVmLWE5ZDMtMmY3OGZkMDZmNWU0IiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNiAwOTo0MDo1MyIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE1IDA5OjQwOjUzIiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.CC41-LtPpJyALM2JkXLGr-lgBxr-zH2IDA0bxXQ4qZo",
"Cookie": "Hm_lvt_58a76bea9bf2c966c440612843fe56ef=1713406021,1715823556;status=false;loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNDc3MWMyMTAtMTMyNS0xMWVmLWE5ZDMtMmY3OGZkMDZmNWU0IiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNiAwOTo0MDo1MyIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE1IDA5OjQwOjUzIiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.CC41-LtPpJyALM2JkXLGr-lgBxr-zH2IDA0bxXQ4qZo;Hm_lpvt_58a76bea9bf2c966c440612843fe56ef=1715836124",
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiMTI4Mzg4ZTAtZjVjNC0xMWVmLTkzYzctODc1MWVmMDg4YWU0IiwiY3JlYXRlZEF0IjoiMjAyNS0wMi0yOCAxOTowNjo0OCIsImVtcGxveWVlTm8iOiIyMzA2MzE2NTUxNzQ4MzYyMjQiLCJleHBpcmVkQXQiOiIyMDI1LTAzLTMwIDE5OjA2OjQ4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5byg5ZCJ5pyLIiwicGVybWlzc2lvbkxpc3QiOltdLCJwaG9uZSI6IjE3NzgzMzE2MTI3Iiwic2hvcE5vIjoiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIiwic2hvcE5vcyI6WyIyMDIzMDIwNjE1MTkwNjE2NDAwMTAwMDEiXSwic2hvcFJvbGVJRHMiOls3MCw3MCw3MF0sInNob3BSb2xlTGlzdCI6W3siSUQiOjcwLCJUaXRsZSI6IueuoeeQhuWRmCJ9XSwic2hvcFRpdGxlIjoi6L-F5b635a6i5pyNIn0.QfcWmQD2btrAZygVgkNbaV4naDkLLhkqaNOZB3FsINo",
"Cookie": "loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiMTI4Mzg4ZTAtZjVjNC0xMWVmLTkzYzctODc1MWVmMDg4YWU0IiwiY3JlYXRlZEF0IjoiMjAyNS0wMi0yOCAxOTowNjo0OCIsImVtcGxveWVlTm8iOiIyMzA2MzE2NTUxNzQ4MzYyMjQiLCJleHBpcmVkQXQiOiIyMDI1LTAzLTMwIDE5OjA2OjQ4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5byg5ZCJ5pyLIiwicGVybWlzc2lvbkxpc3QiOltdLCJwaG9uZSI6IjE3NzgzMzE2MTI3Iiwic2hvcE5vIjoiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIiwic2hvcE5vcyI6WyIyMDIzMDIwNjE1MTkwNjE2NDAwMTAwMDEiXSwic2hvcFJvbGVJRHMiOls3MCw3MCw3MF0sInNob3BSb2xlTGlzdCI6W3siSUQiOjcwLCJUaXRsZSI6IueuoeeQhuWRmCJ9XSwic2hvcFRpdGxlIjoi6L-F5b635a6i5pyNIn0.QfcWmQD2btrAZygVgkNbaV4naDkLLhkqaNOZB3FsINo",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0"
}
......@@ -955,6 +1679,158 @@ def data_ch2():
return '1'
@bp.route('/data_ch_res')
def data_ch_res():
messages = []
i = 0
size = 500
while i <= 30:
page = i * size
#' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
sql_str = 'SELECT id, species, breed, chief_complaint, phys_examination, suspected_disease, treatment, operation_record, doctors_advice ' \
' FROM ch_data2 ' \
' WHERE chief_complaint != "" and suspected_disease != "" and treatment != "" and id > 124' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn.query(
sql=sql_str
)
if len(rows) == 0:
break
i += 1
for v in rows:
chief_complaint = v['chief_complaint'].strip(',').strip('。')
phys_examination = ''
try:
phys_examinations = json.loads(v['phys_examination'])
for v2 in phys_examinations:
if type(phys_examinations[v2]) == dict:
if phys_examinations[v2] is None:
continue
if phys_examinations[v2]['Description'] != '':
phys_examination += phys_examinations[v2]['Description']+','
phys_examination = phys_examination.strip(',')
except Exception as e:
print(v['id'])
# instruction = '现在你是一名宠物医生,请根据病情描述给出诊断及治疗方案'
# if v['breed'] != '':
# instruction += v['breed']+'。'
# instruction += replace_self(chief_complaint)
# if phys_examination != '':
# instruction += phys_examination+'。'
chief_complaint = replace_self(chief_complaint)
suspected_diseases = replace_self(v['suspected_disease'])
# if len(suspected_diseases) <= 1:
# suspected_diseases = v['suspected_disease'].split(',')
# suspected_disease = ','.join(suspected_diseases)
# output = suspected_disease+'。'+v['treatment']+'。'
# if v['doctors_advice'] != '':
# doctors_advice = v['doctors_advice']
# output = replace_self(output)
data1 = {
'species': v['species'],
'breed': v['breed'],
'chief_complaint': chief_complaint,
'phys_examination': phys_examination,
'suspected_disease': suspected_diseases,
'treatment': v['treatment'],
'doctors_advice': v['doctors_advice'],
'old_id':v['id']
}
# messages.append(data1)
dbconn.insert(table='ch_data_res', data=data1)
# with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
# for message in messages:
# file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/data_ch_res2')
def data_ch_res2():
i = 0
size = 500
while i <= 300:
page = i * size
sql_str = 'SELECT id, species, breed, chief_complaint, phys_examination, suspected_disease, treatment, operation_record, doctors_advice, diagnosis_summary ' \
' FROM ch_data4 ' \
' WHERE id > 150500' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn.query(
sql=sql_str
)
if len(rows) == 0:
break
i += 1
for v in rows:
chief_complaint = v['chief_complaint'].strip(',').strip('。')
phys_examination = ''
try:
phys_examinations = json.loads(v['phys_examination'])
for v2 in phys_examinations:
if type(phys_examinations[v2]) == dict:
if phys_examinations[v2] is None:
continue
if phys_examinations[v2]['Description'] != '':
phys_examination += phys_examinations[v2]['Description']+','
phys_examination = phys_examination.strip(',')
except Exception as e:
print(v['id'])
chief_complaint = replace_self(chief_complaint)
suspected_diseases = replace_self(v['suspected_disease'])
data1 = {
'species': v['species'],
'breed': v['breed'],
'chief_complaint': chief_complaint,
'phys_examination': phys_examination,
'suspected_disease': suspected_diseases,
'treatment': v['treatment'],
'doctors_advice': v['doctors_advice'],
'old_id':v['id']
}
if data1['suspected_disease'] == '':
data1['suspected_disease'] = v['diagnosis_summary']
dbconn.insert(table='ch_data_res2', data=data1)
return '1'
@bp.route('/data_ch')
def data_ch():
old_id = '1'
......
import re
from flask import Blueprint, request, json
from app.model.base import dbconn, graph_driver
from app.utils.alioss import AliyunOss
import time
import random
# import re
# from flask import Blueprint, request, json
# from app.model.base import dbconn, graph_driver
# from app.utils.alioss import AliyunOss
# import time
# import random
bp = Blueprint('search', __name__, url_prefix='/search')
# bp = Blueprint('search', __name__, url_prefix='/search')
keyword = []
keyword_clinical = {}
# keyword = []
# keyword_clinical = {}
sql_str = 'SELECT clinical, keyword FROM keyword_clinical'
# sql_str = 'SELECT clinical, keyword FROM keyword_clinical'
rows = dbconn.query(
sql=sql_str
)
# rows = dbconn.query(
# sql=sql_str
# )
for val_keyword in rows:
keyword.append(val_keyword['keyword'])
keyword_clinical[val_keyword['keyword']] = val_keyword['clinical']
# for val_keyword in rows:
# keyword.append(val_keyword['keyword'])
# keyword_clinical[val_keyword['keyword']] = val_keyword['clinical']
@bp.route('/upload', methods=['POST', 'GET'])
def upload():
filename = "seg"+time.strftime("%Y%m%d%H%M%S", time.localtime())+str(random.randint(1000, 9999))+".png"
file = "/Users/haoyanbin/Desktop/WechatIMG24.jpeg"
img_url = AliyunOss().put_object_from_file(filename, file)
# @bp.route('/upload', methods=['POST', 'GET'])
# def upload():
# filename = "seg"+time.strftime("%Y%m%d%H%M%S", time.localtime())+str(random.randint(1000, 9999))+".png"
# file = "/Users/haoyanbin/Desktop/WechatIMG24.jpeg"
# img_url = AliyunOss().put_object_from_file(filename, file)
print(img_url)
return json.dumps(img_url)
# print(img_url)
# return json.dumps(img_url)
@bp.route('/illness_search', methods=['POST', 'GET'])
def illness_search():
print(111)
if request.method == 'POST':
req = request.get_json()
vocab_pattern = '|'.join(keyword)
vocab_data = re.findall(vocab_pattern, req['search'])
# search_data = search_keyword(vocab_data)
search_data = []
for val1 in vocab_data:
search_data.append(keyword_clinical[val1])
# # strData = jieba.cut(data['search_str'], cut_all=False)
# # graph_sql = "match (n:`疾病(三)`) "
# #
# # for search_str in search_data:
# # if search_str == '':
# # continue
# # graph_sql += " with n match (n:`疾病(三)`)-[*1]->(a) where a.name='" + search_str + "' "
# #
# # graph_sql += " return n"
# #
# # graph_data = graph.run(graph_sql).data()
if len(search_data) == 0:
return ''
# @bp.route('/illness_search', methods=['POST', 'GET'])
# def illness_search():
# print(111)
# if request.method == 'POST':
# req = request.get_json()
# vocab_pattern = '|'.join(keyword)
# vocab_data = re.findall(vocab_pattern, req['search'])
# # search_data = search_keyword(vocab_data)
# search_data = []
# for val1 in vocab_data:
# search_data.append(keyword_clinical[val1])
# # # strData = jieba.cut(data['search_str'], cut_all=False)
# # # graph_sql = "match (n:`疾病(三)`) "
# # #
# # # for search_str in search_data:
# # # if search_str == '':
# # # continue
# # # graph_sql += " with n match (n:`疾病(三)`)-[*1]->(a) where a.name='" + search_str + "' "
# # #
# # # graph_sql += " return n"
# # #
# # # graph_data = graph.run(graph_sql).data()
# if len(search_data) == 0:
# return ''
illness_data = {}
for search_str in search_data:
if search_str == '':
continue
graph_sql = "match (n:`疾病`)-[*1]->(a:`病症`) where a.name='" + search_str + "' return n"
records, _, _ = graph_driver.query(graph_sql)
for val2 in records:
if val2['n']['name'] in illness_data.keys():
illness_data[val2['n']['name']]['num'] += 1
else:
illness_data[val2['n']['name']] = {
'num': 1,
'name': val2['n']['name'],
'examintions':val2['n']['examintions'],
'id':val2['n']['id'],
'description':val2['n']['description'],
'prognosis':val2['n']['prognosis'],
'remarks':val2['n']['remarks']
}
if len(illness_data) == 0:
return ''
result = []
list_illness_data = list(illness_data)
# illness_data = {}
# for search_str in search_data:
# if search_str == '':
# continue
# graph_sql = "match (n:`疾病`)-[*1]->(a:`病症`) where a.name='" + search_str + "' return n"
# records, _, _ = graph_driver.query(graph_sql)
# for val2 in records:
# if val2['n']['name'] in illness_data.keys():
# illness_data[val2['n']['name']]['num'] += 1
# else:
# illness_data[val2['n']['name']] = {
# 'num': 1,
# 'name': val2['n']['name'],
# 'examintions':val2['n']['examintions'],
# 'id':val2['n']['id'],
# 'description':val2['n']['description'],
# 'prognosis':val2['n']['prognosis'],
# 'remarks':val2['n']['remarks']
# }
# if len(illness_data) == 0:
# return ''
# result = []
# list_illness_data = list(illness_data)
for key1 in list_illness_data:
# for key1 in list_illness_data:
graph_sql2 = "match (a:`疾病`)-[]->(m:`治疗方案`)-[]->(n:`药品`) where a.name='" + key1 + "' return m,n"
# graph_sql2 = "match (a:`疾病`)-[]->(m:`治疗方案`)-[]->(n:`药品`) where a.name='" + key1 + "' return m,n"
records2, _, _ = graph_driver.query(graph_sql2)
# records2, _, _ = graph_driver.query(graph_sql2)
medicinal = {}
for val2 in records2:
if val2['m']['name'] in medicinal.keys():
medicinal[val2['m']['name']].append(val2['n']['name'])
else:
medicinal[val2['m']['name']] = [val2['n']['name']]
# medicinal = {}
# for val2 in records2:
# if val2['m']['name'] in medicinal.keys():
# medicinal[val2['m']['name']].append(val2['n']['name'])
# else:
# medicinal[val2['m']['name']] = [val2['n']['name']]
illness_data[key1]['treatment'] = medicinal
# illness_data[key1]['treatment'] = medicinal
result.append(illness_data[key1])
# result.append(illness_data[key1])
result_data = sorted(result, key=lambda x: x['num'],reverse=True)
# result_data = sorted(result, key=lambda x: x['num'],reverse=True)
return json.dumps(result_data)
return "1"
def testaaa():
# illnessDict = {} # 疾病
# symptomsDict = {} # 病症
# shipsData = {} # 三元组
# graph_sql = "match (n:`疾病(三)`)-[*1]->(a:`病症`) where a.name='" + search_str + "' return n"
# graph_data = graph.run(graph_sql).data()
# while graph_data.forward():
# for relation in graph_data.current['p']:
# print(list(relation.types()))
# illness_node = relation.start_node
# symptoms_node = relation.end_node
# relation_type = list(relation.types())[0]
# illness_node_label = str(illness_node.labels).strip(":")
# symptoms_node_label = str(symptoms_node.labels).strip(":")
# # 存储三元组关系
# if illness_node['id'] in shipsData.keys():
# target_dict = shipsData.get(illness_node['id'])
# target_dict.setdefault(symptoms_node['id'], relation_type)
# else:
# target_dict = {symptoms_node['id']: relation_type}
# shipsData[illness_node['id']] = target_dict
# # 存储节点
# if ("`疾病(三)`" == illness_node_label) and (illness_node['id'] not in illnessDict.keys()):
# illnessDict[illness_node['id']] = illness_node['name']
# if ("病症" == symptoms_node_label) and (symptoms_node['id'] not in symptomsDict.keys()):
# symptomsDict[symptoms_node['id']] = symptoms_node['name']
# json_list = []
# for illness_key, value in shipsData.items():
# for symptoms_key, rel_type in value.items():
# result_dict = {
# 'illness': illnessDict[illness_key],
# 'rel_type': rel_type,
# 'symptoms': symptomsDict[symptoms_key]
# }
# json_list.append(result_dict)
# print(len(json_list))
# for i in range(len(json_list)):
# print("{}-[{}]->{}".format(json_list[i]['illness'], json_list[i]['rel_type'], json_list[i]['symptoms']))
# return json_list
# data_dict = set()
# nodes = []
# links = []
# for i in range(len(json_list)):
# if json_list[i]['illness'] not in data_dict:
# data_dict.add(json_list[i]['illness'])
# nodes.append({'name': json_list[i]['illness'], "symbolSize": 10, 'draggable': 'true'})
# if json_list[i]['symptoms'] not in data_dict:
# data_dict.add(json_list[i]['symptoms'])
# nodes.append({'name': json_list[i]['symptoms'], "symbolSize": 10, 'draggable': 'true'})
# links.append({'source': json_list[i]['illness'], 'target': json_list[i]['symptoms']})
# pg_graph = pg(init_opts=opts.InitOpts(width='1000px', height='800px'))
# pg_graph.add("", nodes, links, repulsion=8000, edge_symbol=['', 'arrow'])
# pg_graph.set_global_opts(title_opts=opts.TitleOpts(title="病症对应的疾病"))
# print('/'.join(strData)+"")
# sql='SELECT d.id, d.drug_name,a.res FROM drug as d left join api as a on d.drug_name = a.param where d.id > 943'
# drug = mysqlconn.query(
# sql='SELECT d.id, d.disease_name FROM disease as d where d.id > 0'
# )
# print(drug)
# return render_template('index.html')
# return pg_graph.render_notebook()
# return json.dumps(list(strData))
return "1"
\ No newline at end of file
# return json.dumps(result_data)
# return "1"
# def testaaa():
# # illnessDict = {} # 疾病
# # symptomsDict = {} # 病症
# # shipsData = {} # 三元组
# # graph_sql = "match (n:`疾病(三)`)-[*1]->(a:`病症`) where a.name='" + search_str + "' return n"
# # graph_data = graph.run(graph_sql).data()
# # while graph_data.forward():
# # for relation in graph_data.current['p']:
# # print(list(relation.types()))
# # illness_node = relation.start_node
# # symptoms_node = relation.end_node
# # relation_type = list(relation.types())[0]
# # illness_node_label = str(illness_node.labels).strip(":")
# # symptoms_node_label = str(symptoms_node.labels).strip(":")
# # # 存储三元组关系
# # if illness_node['id'] in shipsData.keys():
# # target_dict = shipsData.get(illness_node['id'])
# # target_dict.setdefault(symptoms_node['id'], relation_type)
# # else:
# # target_dict = {symptoms_node['id']: relation_type}
# # shipsData[illness_node['id']] = target_dict
# # # 存储节点
# # if ("`疾病(三)`" == illness_node_label) and (illness_node['id'] not in illnessDict.keys()):
# # illnessDict[illness_node['id']] = illness_node['name']
# # if ("病症" == symptoms_node_label) and (symptoms_node['id'] not in symptomsDict.keys()):
# # symptomsDict[symptoms_node['id']] = symptoms_node['name']
# # json_list = []
# # for illness_key, value in shipsData.items():
# # for symptoms_key, rel_type in value.items():
# # result_dict = {
# # 'illness': illnessDict[illness_key],
# # 'rel_type': rel_type,
# # 'symptoms': symptomsDict[symptoms_key]
# # }
# # json_list.append(result_dict)
# # print(len(json_list))
# # for i in range(len(json_list)):
# # print("{}-[{}]->{}".format(json_list[i]['illness'], json_list[i]['rel_type'], json_list[i]['symptoms']))
# # return json_list
# # data_dict = set()
# # nodes = []
# # links = []
# # for i in range(len(json_list)):
# # if json_list[i]['illness'] not in data_dict:
# # data_dict.add(json_list[i]['illness'])
# # nodes.append({'name': json_list[i]['illness'], "symbolSize": 10, 'draggable': 'true'})
# # if json_list[i]['symptoms'] not in data_dict:
# # data_dict.add(json_list[i]['symptoms'])
# # nodes.append({'name': json_list[i]['symptoms'], "symbolSize": 10, 'draggable': 'true'})
# # links.append({'source': json_list[i]['illness'], 'target': json_list[i]['symptoms']})
# # pg_graph = pg(init_opts=opts.InitOpts(width='1000px', height='800px'))
# # pg_graph.add("", nodes, links, repulsion=8000, edge_symbol=['', 'arrow'])
# # pg_graph.set_global_opts(title_opts=opts.TitleOpts(title="病症对应的疾病"))
# # print('/'.join(strData)+"")
# # sql='SELECT d.id, d.drug_name,a.res FROM drug as d left join api as a on d.drug_name = a.param where d.id > 943'
# # drug = mysqlconn.query(
# # sql='SELECT d.id, d.disease_name FROM disease as d where d.id > 0'
# # )
# # print(drug)
# # return render_template('index.html')
# # return pg_graph.render_notebook()
# # return json.dumps(list(strData))
# return "1"
\ No newline at end of file
......@@ -10,24 +10,40 @@ dbconn = MYSQL(
dbcharset='utf8'
)
# dbconn2 = MYSQL(
# dbhost='rm-2zenl1z0v6209a4jrbo.mysql.rds.aliyuncs.com',
# dbport=3306,
# dbuser='root_medical',
# dbpwd='dbc_medical888888!',
# dbname='dbc_medical_record',
# dbcharset='utf8'
# )
dbconn2 = MYSQL(
dbhost='rm-2zenl1z0v6209a4jrbo.mysql.rds.aliyuncs.com',
dbport=3306,
dbuser='root_medical',
dbpwd='dbc_medical888888!',
dbname='dbc_medical_record',
dbhost='39.96.85.45',
dbport=3307,
dbuser='root',
dbpwd='abc123456',
dbname='Illness',
dbcharset='utf8'
)
# dbconn3 = MYSQL(
# dbhost='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
# dbport=3306,
# dbuser='dbc_saas',
# dbpwd='dbc_saas888888',
# dbname='medical_platform',
# dbcharset='utf8'
# )
dbconn3 = MYSQL(
dbhost='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
dbport=3306,
dbuser='dbc_saas',
dbpwd='dbc_saas888888',
dbname='medical_platform',
dbhost='39.96.85.45',
dbport=3307,
dbuser='root',
dbpwd='abc123456',
dbname='Illness',
dbcharset='utf8'
)
# graph = Graph("", auth=("neo4j", "11111111"))
# graph = Graph("bolt://gds-2zeyv40mi12s6mjc149870pub.graphdb.rds.aliyuncs.com:3734", auth=("dbc_tair", "dbc_tair888888"))
# print(graph)
......
......@@ -4,4 +4,4 @@ app = create_app()
if __name__ == '__main__':
# app.run(host='0.0.0.0', port=5080, debug=True)
app.run(port=5080, debug=True)
app.run(port=8000, debug=True)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment