Commit b1c6043f authored by haoyanbin's avatar haoyanbin

1

parent ea514cf8
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -5,13 +5,757 @@ import re ...@@ -5,13 +5,757 @@ import re
import time import time
from flask import Blueprint, json from flask import Blueprint, json
import requests import requests
# from clickhouse_driver import Client from app.model.base import dbconn, dbconn2, dbconn3
from app.model.base import dbconn, dbconn2
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
bp = Blueprint('importdata', __name__, url_prefix='/importdata') bp = Blueprint('importdata', __name__, url_prefix='/importdata')
@bp.route('/data_getimage')
def data_getimage():
i = 0
size = 100
f = open('test.jsonl', 'a', encoding='UTF-8')
while i <= 500:
page = i * size
i += 1
sql_str = 'select `study_instance_uid`, `describe`, `diagnosis`, `images_png`, `images_jpeg` from vpet_data ' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn.query(
sql=sql_str
)
if len(rows) == 0:
break
for val in rows:
data1 = {
'study_instance_uid': val['study_instance_uid'],
'describe': val['describe'].strip(' '),
'diagnosis': val['diagnosis'].strip(' '),
'images_png': val['images_png'],
'images_jpeg': val['images_jpeg']
}
f.write('\n'+json.dumps(data1))
f.close()
return '1'
@bp.route('/data_vpet')
def data_vpet():
i = 0
size = 100
while i <= 100:
page = i * size
i += 1
sql_str = 'select di.id, di.study_instance_uid, dir.describe, dir.diagnosis, di.chief_complaint,'\
' di.pet_species, di.pet_breed, di.pet_sex, di.pet_sterilization from medical_platform.dcm_interpretation as di ' \
' left join dcm_interpretation_read as dir on di.id = dir.interpretation_id ' \
' where di.created_at < "2025-01-01 00:00:00" and di.id > 40773 and di.device_name = "DR" and di.status = 4 and di.study_instance_uid != ""' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn3.query(
sql=sql_str
)
if len(rows) == 0:
break
for val in rows:
study_instance_uid = val['study_instance_uid']
sql_str2 = 'select di.id, di.png, di.jpeg, di.boost_png, di.boost_jpeg, di.is_boost from hos_database.de_instances as di ' \
' left join hos_database.de_series as dse on dse.id = di.de_series_id ' \
' left join hos_database.de_study as ds on ds.id = dse.de_study_id ' \
' where ds.study_instance_uid = "'+study_instance_uid+'"'
rows2 = dbconn3.query(
sql=sql_str2
)
images_png = ""
images_jpeg = ""
for v in rows2:
jpeg = ''
png = ''
if v['is_boost'] == 1:
jpeg = v['boost_jpeg']
png = v['boost_png']
else:
jpeg = v['jpeg']
png = v['png']
images_png += png + ","
images_jpeg += jpeg + ","
images_png = images_png.strip(',')
images_jpeg = images_jpeg.strip(',')
data1 = {
'study_instance_uid': val['study_instance_uid'],
'describe': val['describe'],
'diagnosis': val['diagnosis'],
'images_png': images_png,
'images_jpeg': images_jpeg,
'chief_complaint': val['chief_complaint'],
'pet_species': val['pet_species'],
'pet_breed': val['pet_breed'],
'pet_sex': val['pet_sex'],
'pet_sterilization': val['pet_sterilization'],
'business_id': val['id'],
}
list_id = dbconn.insert(table='vpet_data', data=data1)
return '1'
@bp.route('/vetlas_ins')
def vetlas_ins():
save_dir = "/disk0/data/images/disease/"
with open('/Users/haoyanbin/Desktop/vetlas_list.json', 'r') as file:
result = file.read()
rows = json.loads(result)
# url = 'http://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/collect/list'
# result = requests.get(url=url)
# rows = json.loads(result.text)
for row in rows:
imagesZipFileId = ''
if str(row['imagesZipFileId']) != 'None':
imagesZipFileId = str(row['imagesZipFileId'])
imagesNumber = ''
if str(row['imagesNumber']) != 'None':
imagesNumber = str(row['imagesNumber'])
loading = ''
if str(row['loading']) != 'None':
loading = str(row['loading'])
failReason = ''
if str(row['failReason']) != 'None':
failReason = str(row['failReason'])
data1 = {
'collectId': row['id'],
'collectName': row['collectName'],
'imagesZipFileId': imagesZipFileId,
'imagesNumber': imagesNumber,
'meta': json.dumps(row['meta']),
'thumbnail': row['thumbnail'],
'tags': json.dumps(row['tags']),
'loading': loading,
'failReason': failReason,
'collect_date': row['createdAt'],
}
list_id = dbconn.insert(table='vetlas_list', data=data1)
with open('/Users/haoyanbin/Desktop/vetlas_info.json', 'r') as file:
result2 = file.read()
rows2 = json.loads(result2)
# url2 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/filter/list?collectName=Xray_LungPattern_v8.3&id='+row['id']
# result2 = requests.get(url=url2)
# rows2 = json.loads(result2.text)
for row2 in rows2:
free = ''
if str(row2['free']) != 'None':
free = str(row2['free'])
cover = ''
if str(row2['cover']) != 'None':
cover = str(row2['cover'])
data2 = {
'list_id': list_id,
'collectId': row['id'],
'collectName': row2['collectName'],
'file_id': json.dumps(row2['file_id']),
'file_url': json.dumps(row2['file_url']),
'tupuName': row2['tupuName'],
'index': row2['index'],
'free': free,
'cover': cover,
}
info_id = dbconn.insert(table='vetlas_info', data=data2)
ins_data21 = []
for row21 in row2['marks']:
save_path = save_dir + row['id'] + "/"
filename = row21['file_url'].split("/")[-1]
download_image(row21['file_url'], save_path, filename)
data21 = {
'info_id': info_id,
'RGB': json.dumps(row21['RGB']),
'point': json.dumps(row21['point']),
'system': row21['system'],
'yIndex': row21['yIndex'],
'file_id': row21['file_id'],
'file_url': row21['file_url'],
'name_en': row21['name_en'],
'name_zh': row21['name_zh'],
'markName': row21['markName'],
'position': row21['position'],
'tupuName': row21['tupuName'],
'description': row21['description'],
'old_id': row21['ID'],
'image_path': save_path + filename
}
ins_data21.append(data21)
dbconn.insert_all(table='vetlas_info_marks', data=ins_data21)
with open('/Users/haoyanbin/Desktop/vetlas_filter.json', 'r') as file:
result3 = file.read()
rows3 = json.loads(result3)
# url3 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/tupu/json_all?collectName=Xray_LungPattern_v8.3&collectId='+row['id']
# result3 = requests.get(url=url3)
# rows3 = json.loads(result3.text)
for row3 in rows3['filter']:
data3 = {
'list_id': list_id,
'name': row3['name'],
'system': row3['system'],
'default_show': row3['default_show'],
'old_id': rows3['id'],
}
filter_id = dbconn.insert(table='vetlas_filter', data=data3)
ins_data3 = []
for row31 in row3['marks']:
data31 = {
'filter_id': filter_id,
'RGB': json.dumps(row31['RGB']),
'name': row31['name'],
'name_en': row31['name_en'],
'name_zh': row31['name_zh'],
'description': row31['description'],
'old_id': row31['ID'],
'mark': 1,
}
ins_data3.append(data31)
# dbconn.insert_all(table='vetlas_filter_info', data=ins_data31)
# ins_data32 = []
for row32 in row3['second']:
data32 = {
'filter_id': filter_id,
'RGB': json.dumps(row32['RGB']),
'name': row32['name'],
'name_en': row32['name_en'],
'name_zh': row32['name_zh'],
'description': row32['description'],
'old_id': row32['ID'],
'mark': 2,
}
ins_data3.append(data32)
dbconn.insert_all(table='vetlas_filter_info', data=ins_data3)
return '1'
@bp.route('/data_aaa')
def data_aaa():
i = 0
size = 100
while i <= 20:
page = i * size
i += 1
sql_str = 'select study_instance_uid from medical_platform.dcm_image_list_new' \
' where examine_pass_status = 3 and valid_status = 1' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn3.query(
sql=sql_str
)
if len(rows) == 0:
break
study_instance_uid = ''
for v in rows:
study_instance_uid += '"'+v['study_instance_uid']+'",'
study_instance_uid = study_instance_uid.strip(',')
sql_str2 = 'select de_instance_id from medical_platform.dcm_image_list_instance' \
' where instance_type = 4 and examine_pass_status = 3 and valid_status = 1' \
' and study_instance_uid in ('+study_instance_uid+')'
rows2 = dbconn3.query(
sql=sql_str2
)
de_instance_id = ''
for v2 in rows2:
de_instance_id += str(v2['de_instance_id'])+','
de_instance_id = de_instance_id.strip(',')
sql_str3 = 'select m.de_instances_id, m._value, m.de_organ_name, m.de_disease_name, m.de_symptom_name, i.jpeg, i.png, i.is_boost, i.boost_jpeg, i.boost_png' \
' from hos_database.de_instance_new_mark as m' \
' left join hos_database.de_instances as i on m.de_instances_id = i.id ' \
' where m.de_instances_id in ('+de_instance_id+') and m.de_organ_name = "肺"'
rows3 = dbconn3.query(
sql=sql_str3
)
print(len(rows3))
if len(rows3) == 0:
continue
for v3 in rows3:
jpeg = ''
png = ''
if v3['is_boost'] == 1:
jpeg = v3['boost_jpeg']
png = v3['boost_png']
else:
jpeg = v3['jpeg']
png = v3['png']
print(v3['de_instances_id'], v3['de_organ_name'], v3['de_disease_name'], v3['de_symptom_name'], jpeg, png, v3['_value'])
return '1'
@bp.route('/data_med')
def data_med():
i = 0
size = 100
while i <= 75:
page = i * size
sql_str = 'SELECT id, study_instance_uid, device_name, body_part_examined, check_item' \
', pet_species, pet_breed, pet_sex, pet_age_year, pet_age_month, pet_sterilization' \
', chief_complaint, pet_history, clinical, image_description, disease_diagnosis' \
', treatment_plan, imaging_diagnosis, return_remark, quality_type, diagnosis_cate_id, illness_cate_id' \
' FROM dcm_interpretation ' \
' WHERE chief_complaint != "" ' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn3.query(
sql=sql_str
)
i += 1
for v in rows:
sql_str1 = 'SELECT body_part_examined, check_item ' \
' FROM dcm_interpretation_image ' \
' WHERE interpretation_id = ' + str(v['id'])
rows1 = dbconn3.query(
sql=sql_str1
)
body_part_examined = ''
check_item = ''
for v1 in rows1:
body_part_examined += v1['body_part_examined']
check_item += v1['check_item']
diagnosis_cate = ''
if v['diagnosis_cate_id'] > 0:
sql_str2 = 'SELECT cate_name' \
' FROM dcm_category ' \
' WHERE id = ' + str(v['diagnosis_cate_id'])
rows2 = dbconn3.query(
sql=sql_str2
)
diagnosis_cate = rows2[0]['cate_name']
illness_cate = ''
if v['illness_cate_id'] > 0:
sql_str3 = 'SELECT cate_name' \
' FROM dcm_category ' \
' WHERE id = ' + str(v['illness_cate_id'])
rows3 = dbconn3.query(
sql=sql_str3
)
illness_cate = rows3[0]['cate_name']
sql_str4 = 'SELECT `describe`, diagnosis ' \
' FROM dcm_interpretation_read ' \
' WHERE interpretation_id = ' + str(v['id']) + ' order by id desc'
rows4 = dbconn3.query(
sql=sql_str4
)
describe = ''
diagnosis = ''
if len(rows4) > 0 :
describe = rows4[0]['describe']
diagnosis = rows4[0]['diagnosis']
data1 = {
'old_id':v['id'],
'study_instance_uid':v['study_instance_uid'],
'device_name':v['device_name'],
'body_part_examined':body_part_examined,
'check_item':check_item,
'pet_species':v['pet_species'],
'pet_breed':v['pet_breed'],
'pet_sex':v['pet_sex'],
'pet_age_year':v['pet_age_year'],
'pet_age_month':v['pet_age_month'],
'pet_sterilization':v['pet_sterilization'],
'chief_complaint':v['chief_complaint'].strip(',').strip('。'),
'pet_history':v['pet_history'].strip(',').strip('。'),
'clinical':v['clinical'].strip(',').strip('。'),
'image_description':v['image_description'].strip(',').strip('。'),
'disease_diagnosis':v['disease_diagnosis'].strip(',').strip('。'),
'treatment_plan':v['treatment_plan'].strip(',').strip('。'),
'imaging_diagnosis':v['imaging_diagnosis'].strip(',').strip('。'),
'return_remark':v['return_remark'].strip(',').strip('。'),
'quality_type':v['quality_type'],
'diagnosis_cate':diagnosis_cate,
'illness_cate':illness_cate,
'describe':describe,
'diagnosis':diagnosis,
'source':1
}
dbconn.insert(table='med_record', data=data1)
return '1'
@bp.route('/data_cate_goods')
def data_cate_goods():
i = 1
size = 100
while i <= 48:
page = (i-1) * size
sql_str = 'select xc.id, reply from xd_data1 as xd left join xd_cate as xc on xd.old_id = xc.old_id limit ' + str(page) + ', ' + str(size)
dataa = dbconn.query(
sql=sql_str
)
for v in dataa:
rows = json.loads(v['reply'])
if rows['data']['treatment'] != "":
data1 = {
'cate_id': v['id'],
'treatment': rows['data']['treatment']
}
dbconn.insert(table='xd_cate_treatment_msg', data=data1)
if len(rows['data']['commonList']) == 0 :
continue
for val2 in rows['data']['commonList']:
goods_name = ''
for val4 in val2['goodsCenterList']:
goods_name += val4['goodsName'] + ','
goods_name = goods_name.strip(',')
treatment_id = get_xd_treatment(val2['title'],val2['introduce'],goods_name,val2['id'])
data2 = {
'treatment_id': treatment_id,
'cate_id': v['id']
}
dbconn.insert(table='xd_cate_treatment', data=data2)
for val3 in val2['goodsCenterList']:
rows3 = dbconn.fetch_rows(
table='xd_goods',
fields='id',
condition={'goods_no': val3['goodsCenterNo']},
fetchone=True
)
if rows is None:
continue
else:
data3 = {
'treatment_id': treatment_id,
'goods_id': rows3['id']
}
dbconn.insert(table='xd_treatment_goods', data=data3)
i+=1
return "1"
def get_xd_treatment(name,introduce,goods_name,old_id):
cond_b = {'name': name,'introduce':introduce,'goods_name':goods_name,'old_id':old_id}
rows = dbconn.fetch_rows(
table='xd_treatment',
fields='id, name',
condition={'old_id': old_id},
fetchone=True
)
if rows is None:
return dbconn.insert(table='xd_treatment', data=cond_b)
else:
return rows['id']
@bp.route('/data_goods')
def data_goods():
i = 1
size = 100
while i <= 24:
page = (i-1) * size
sql_str = 'select reply from xd_data2 limit ' + str(page) + ', ' + str(size)
dataa = dbconn.query(
sql=sql_str
)
for v in dataa:
rows = json.loads(v['reply'])
for val1 in rows['data']['list']:
data2 = {
'name': val1['name'],
'goods_no': val1['goodsNo'],
'brand_name': val1['brandName'],
'specification': val1['specification'],
'category_names': ','.join(val1['categoryNames'])
}
dbconn.insert(table='xd_goods', data=data2)
i+=1
return "1"
@bp.route('/rep_xd_data2')
def rep_xd_data2():
i = 1
while i <= 2333:
url = 'https://his.betacat.co/api/pet/v1/admin/goods/center/list?categoryId=0&pageSize=10&pageIndex='+str(i)+'&categoryName=&searchKeys=&activeStatus=-1'
headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNWRlMDNhMDAtMTM0Mi0xMWVmLWI1NTctYmRhMGJjZmYyMTFjIiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNyAwOTozOTo1OCIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE2IDA5OjM5OjU4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.IXuN9Fmc2tcR0208SvvvMzoLVPOBZ6kPhCdoV0Y_lXg",
"Cookie": "Hm_lvt_58a76bea9bf2c966c440612843fe56ef=1713406021,1715823556; status=false; Hm_lpvt_58a76bea9bf2c966c440612843fe56ef=1715836124; loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNWRlMDNhMDAtMTM0Mi0xMWVmLWI1NTctYmRhMGJjZmYyMTFjIiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNyAwOTozOTo1OCIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE2IDA5OjM5OjU4IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.IXuN9Fmc2tcR0208SvvvMzoLVPOBZ6kPhCdoV0Y_lXg",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0"
}
# params = {"list": [val3['name']]}
result = requests.request('GET', url=url, headers=headers)
# result = requests.request('POST', url=url, headers=headers, json=params)
# rows = json.loads(result.text)
print(i)
data2 = {
'old_id': i,
'req': url,
'reply': result.text
}
dbconn.insert(table='xd_data2', data=data2)
i+=1
return '1'
@bp.route('/data_ch_cate')
def data_ch_cate():
sql_str = 'select keyword from report_keyword where `type`= 5 group by keyword'
rows = dbconn2.query(
sql=sql_str
)
for val1 in rows:
data2 = {
'name': replace_self(val1['keyword'])
}
dbconn.insert(table='ch_cate', data=data2)
return "1"
@bp.route('/rep_xd_data1')
def rep_xd_data1():
category3 = dbconn.query(
sql='SELECT id, `name`, old_id FROM xd_cate where id > 0 and is_set = 0'
)
for val3 in category3:
# time.sleep(random.randint(1, 5))
url = 'https://his.betacat.co/api/pet/v1/config/preDiagnosis/'+str(val3['old_id'])
headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNDc3MWMyMTAtMTMyNS0xMWVmLWE5ZDMtMmY3OGZkMDZmNWU0IiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNiAwOTo0MDo1MyIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE1IDA5OjQwOjUzIiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.CC41-LtPpJyALM2JkXLGr-lgBxr-zH2IDA0bxXQ4qZo",
"Cookie": "Hm_lvt_58a76bea9bf2c966c440612843fe56ef=1713406021,1715823556;status=false;loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiNDc3MWMyMTAtMTMyNS0xMWVmLWE5ZDMtMmY3OGZkMDZmNWU0IiwiY3JlYXRlZEF0IjoiMjAyNC0wNS0xNiAwOTo0MDo1MyIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTA2LTE1IDA5OjQwOjUzIiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI0LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.CC41-LtPpJyALM2JkXLGr-lgBxr-zH2IDA0bxXQ4qZo;Hm_lpvt_58a76bea9bf2c966c440612843fe56ef=1715836124",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0"
}
# params = {"list": [val3['name']]}
result = requests.request('GET', url=url, headers=headers)
# result = requests.request('POST', url=url, headers=headers, json=params)
# rows = json.loads(result.text)
print(url)
cond = {'id': val3['id']}
data_end = {'is_set': 1}
dbconn.update(table='xd_cate', data=data_end, condition=cond)
data2 = {
'id':val3['id'],
'old_id': val3['old_id'],
'req': url,
'reply': result.text
}
dbconn.insert(table='xd_data1', data=data2)
# if len(rows['data']['commonList']) == 0:
# continue
# for row in rows['data']['commonList']:
return '1'
@bp.route('/data_xd_goods_cate')
def data_xd_goods_cate():
json_data = json.load(open("xd_goods_cate.json"))
goods_cate(json_data['data']['list'],0,1)
return "1"
def goods_cate(child, pid, children):
for val in child:
data3 = {
'old_id':val['id'],
'pid': pid,
'name': val['title'],
'children': children
}
cateid = dbconn.insert(table='xd_goods_cate', data=data3)
if len(val['child']) == 0:
continue
else:
goods_cate(val['child'], cateid, children + 1)
@bp.route('/rep_xd_goods_bat')
def rep_xd_goods_bat():
res = dbconn.query(
sql='SELECT xd_cate_id FROM xd_cate_info where id > 11625 group by xd_cate_id'
)
for v in res :
cond = {'id': v['xd_cate_id']}
data_end = {'is_has': 1}
dbconn.update(table='xd_cate', data=data_end, condition=cond)
return "1"
@bp.route('/rep_xd_goods')
def rep_xd_goods():
category3 = dbconn.query(
sql='SELECT id, `name` FROM xd_cate where id > 0 and is_set = 0 and is_has = 0'
)
for val3 in category3:
time.sleep(random.randint(1, 5))
url = 'https://his.betacat.co/api/pet/v1/medical/book/query'
# url = 'https://medical.bk-pet.cn/common/getRedisData'
headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiZjY2Y2VmMjAtN2IwMi0xMWVmLWFkYjAtMWJkYjg2OGRmYWJmIiwiY3JlYXRlZEF0IjoiMjAyNC0wOS0yNSAxNTowMjo0NSIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTEwLTI1IDE1OjAyOjQ1IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.FXlBo6voiTYFFDOOPKRHFa5UUSGq-GtR__Ob6fXZDAo",
"Cookie": "Hm_lvt_58a76bea9bf2c966c440612843fe56ef=1727241030; HMACCOUNT=8764FB1D381CED8F; status=false; loginToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJicm93c2VyS2V5IjoiZjY2Y2VmMjAtN2IwMi0xMWVmLWFkYjAtMWJkYjg2OGRmYWJmIiwiY3JlYXRlZEF0IjoiMjAyNC0wOS0yNSAxNTowMjo0NSIsImVtcGxveWVlTm8iOiIyMzI1NTc5NTIxNzgzMjc1NTIiLCJleHBpcmVkQXQiOiIyMDI0LTEwLTI1IDE1OjAyOjQ1IiwiaG9zcGl0YWxFeHBpcmVkQXQiOiIyMDI1LTA3LTI3IDIzOjU5OjU5IiwiaG9zcGl0YWxObyI6IjIwMjMwNzA0MjE1MjQxMzk5MDAxMDAxMyIsImhvc3BpdGFsVGl0bGUiOiLov4XlvrflrqDnianljLvpmaIiLCJuYW1lIjoi5biC5Zy6LeadjuiSmeiSmSIsInBlcm1pc3Npb25MaXN0IjpbXSwicGhvbmUiOiIxMzc3Njg1MDIwMSIsInNob3BObyI6IjIwMjMwMjA2MTUxOTA2MTY0MDAxMDAwMSIsInNob3BOb3MiOlsiMjAyMzAyMDYxNTE5MDYxNjQwMDEwMDAxIl0sInNob3BSb2xlSURzIjpbNzAsNzBdLCJzaG9wUm9sZUxpc3QiOlt7IklEIjo3MCwiVGl0bGUiOiLnrqHnkIblkZgifV0sInNob3BUaXRsZSI6Iui_heW-t-WuouacjSJ9.FXlBo6voiTYFFDOOPKRHFa5UUSGq-GtR__Ob6fXZDAo; Hm_lpvt_58a76bea9bf2c966c440612843fe56ef=1727247779",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0"
}
params = {"list": [val3['name']]}
result = requests.request('POST', url=url, headers=headers, json=params)
rows = json.loads(result.text)
cond = {'id': val3['id']}
data_end = {'is_set': 1}
dbconn.update(table='xd_cate', data=data_end, condition=cond)
if len(rows['data']['list']) == 0:
continue
for row in rows['data']['list'][0]['titles']:
data2 = {
'xd_cate_id': val3['id'],
'title': row['title'],
'content': row['content']
}
dbconn.insert(table='xd_cate_info', data=data2)
return '1'
@bp.route('/data_xd3') @bp.route('/data_xd3')
def data_xd3(): def data_xd3():
category3 = dbconn.query( category3 = dbconn.query(
...@@ -85,21 +829,151 @@ def data_symptoms2(): ...@@ -85,21 +829,151 @@ def data_symptoms2():
return "1" return "1"
@bp.route('/data_ch4')
def data_ch4():
messages = []
# 读取旧的JSONL文件
with open("./alpaca_gpt4_data_zh.json", "r") as file:
data = json.load(file)
for v in data:
instruction = v["instruction"]
input = v["input"]
output = v["output"]
message = {
"instruction": instruction,
"input": input,
"output": output,
}
messages.append(message)
# 保存重构后的JSONL文件
with open("./gpt4_data_zh", "w", encoding="utf-8") as file:
for message in messages:
file.write(json.dumps(message, ensure_ascii=False) + "\n")
return "1"
@bp.route('/data_ch3')
def data_ch3():
messages = []
# 读取旧的JSONL文件
with open("/Users/haoyanbin/Downloads/alpaca_gpt4_data_zh.json", "r") as file:
data = json.load(file)
for v in data:
instruction = v["instruction"]
input = v["input"]
output = v["output"]
message = {
"instruction": instruction,
"input": input,
"output": output,
}
messages.append(message)
# 保存重构后的JSONL文件
with open("./gpt4_data_zh", "w", encoding="utf-8") as file:
for message in messages:
file.write(json.dumps(message, ensure_ascii=False) + "\n")
return "1"
@bp.route('/data_ch2')
def data_ch2():
messages = []
i = 0
size = 500
while i <= 30:
page = i * size
#' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
sql_str = 'SELECT breed, chief_complaint, phys_examination, suspected_disease, treatment, operation_record, doctors_advice, return_visit ' \
' FROM ch_data2 ' \
' WHERE chief_complaint != "" and suspected_disease != "" and treatment != "" ' \
' limit ' + str(page) + ', ' + str(size)
rows = dbconn.query(
sql=sql_str
)
if len(rows) == 0:
break
i += 1
for v in rows:
chief_complaint = v['chief_complaint'].strip(',').strip('。')
# phys_examinations = json.loads(v['phys_examination'])
# phys_examination = ''
# for v2 in phys_examinations:
# if phys_examinations[v2] is None:
# continue
# if phys_examinations[v2]['Description'] != '':
# phys_examination += phys_examinations[v2]['Description']+','
# phys_examination = phys_examination.strip(',')
instruction = '现在你是一名宠物医生,请根据病情描述给出诊断及治疗方案'
# if v['breed'] != '':
# instruction += v['breed']+'。'
# instruction += replace_self(chief_complaint)
# if phys_examination != '':
# instruction += phys_examination+'。'
input = replace_self(chief_complaint)
suspected_diseases = v['suspected_disease'].split(' ')
if len(suspected_diseases) <= 1:
suspected_diseases = v['suspected_disease'].split(',')
suspected_disease = ','.join(suspected_diseases)
output = suspected_disease+'。'+v['treatment']+'。'
if v['doctors_advice'] != '':
output += v['doctors_advice']
output = replace_self(output)
data1 = {
'instruction': instruction,
'input': input,
'output': output
}
messages.append(data1)
# dbconn.insert(table='llama_data', data=data1)
with open('./med_data2.jsonl', "w", encoding="utf-8") as file:
for message in messages:
file.write(json.dumps(message, ensure_ascii=False) + "\n")
return '1'
@bp.route('/data_ch') @bp.route('/data_ch')
def data_ch(): def data_ch():
old_id = '0' old_id = '1'
i = 1 i = 0
size = 100 size = 500
while i <= 10: while i <= 31:
page = i * size page = i * size
sql_str = 'SELECT rm.id, rm.orgin_id, rp.chief_complaint, rp.treatment, rp.doctors_advice, rms.symptom, rms.subject, re.diagnosis_summary' \ #' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
# sql_str = 'SELECT rm.id, rm.orgin_id, rp.species, rp.breed, rp.patient_gender_code, rp.weight, rp.is_sterilization, rp.chief_complaint, rp.phys_examination, rp.suspected_disease, rp.treatment, rp.operation_record, rp.doctors_advice, rp.return_visit ' \
# ' FROM ch_report_main as rm ' \
# ' left join ch_report_patient as rp on rm.orgin_id=rp.report_main_orgin_id ' \
# ' where rm.valid_status = 1 and rm.visit_status = 1 and rm.id >' + old_id + \
# ' limit ' + str(page) + ', ' + str(size)
sql_str = 'SELECT rm.id, rm.orgin_id, rp.species, rp.breed, rp.patient_gender_code, rp.weight, rp.is_sterilization, rp.chief_complaint, rp.phys_examination, rp.suspected_disease, rp.treatment, rp.operation_record, rp.doctors_advice, rp.return_visit ' \
' FROM ch_report_main as rm ' \ ' FROM ch_report_main as rm ' \
' left join ch_report_evalute as re on rm.orgin_id=re.report_main_orgin_id ' \
' left join ch_report_patient as rp on rm.orgin_id=rp.report_main_orgin_id ' \ ' left join ch_report_patient as rp on rm.orgin_id=rp.report_main_orgin_id ' \
' left join ch_report_customer as rc on rm.orgin_id=rc.report_main_orgin_id' \ ' where rp.chief_complaint != "" and rp.suspected_disease !="" and rp.treatment !="" and rm.id >' + old_id + \
' left join report_main_status as rms on rm.orgin_id=rms.report_main_orgin_id' \
' where rm.valid_status = 1 and rm.visit_status = 1 and id >' + old_id + \
' limit ' + str(page) + ', ' + str(size) ' limit ' + str(page) + ', ' + str(size)
rows = dbconn2.query( rows = dbconn2.query(
...@@ -110,49 +984,56 @@ def data_ch(): ...@@ -110,49 +984,56 @@ def data_ch():
for val1 in rows: for val1 in rows:
data1 = { data1 = {
'name': val1['diagnosis_summary'], 'orgin_id': val1['orgin_id'],
'chief': val1['chief_complaint'], 'species': val1['species'],
'doctors_advice': val1['doctors_advice'], 'breed': val1['breed'],
'doctors_advice_data': '', 'patient_gender_code': val1['patient_gender_code'],
'subject': val1['subject'], 'weight': val1['weight'],
'is_sterilization': val1['is_sterilization'],
'chief_complaint': val1['chief_complaint'],
'phys_examination': val1['phys_examination'],
'suspected_disease': val1['suspected_disease'],
'treatment': val1['treatment'], 'treatment': val1['treatment'],
'symptom': val1['symptom'], 'operation_record': val1['operation_record'],
'old_id': val1['id'], 'doctors_advice': val1['doctors_advice'],
'illness_data': '' 'return_visit': val1['return_visit'],
'old_id': val1['id']
} }
sql_str2 = "SELECT `type`, keyword FROM report_keyword where report_main_orgin_id = '" + val1[ dbconn.insert(table='ch_data2', data=data1)
'orgin_id'] + "'"
rows2 = dbconn2.query( # sql_str2 = "SELECT `type`, keyword FROM report_keyword where report_main_orgin_id = '" + val1[
sql=sql_str2 # 'orgin_id'] + "'"
)
# 1 主诉 2 诊断报告 3 影像报告 4 分析结果 5 病情诊断 6 治疗方案 7 医嘱 # rows2 = dbconn2.query(
for val2 in rows2: # sql=sql_str2
if val2['type'] == 7: # )
data1['doctors_advice_data'] += val2['keyword'] + ','
if val2['type'] == 5: # # 1 主诉 2 诊断报告 3 影像报告 4 分析结果 5 病情诊断 6 治疗方案 7 医嘱
data1['illness_data'] += val2['keyword'] + ',' # for val2 in rows2:
# if val2['type'] == 7:
# data1['doctors_advice_data'] += val2['keyword'] + ','
# 主诉-病症 # if val2['type'] == 5:
if val2['type'] == 1: # data1['illness_data'] += val2['keyword'] + ','
data2 = {
'illness_id': val2['id'],
'name': val2['keyword']
}
dbconn.insert(table='ch_symptoms', data=data2)
# 主诉-病症 # # 主诉-病症
if val2['type'] == 6: # if val2['type'] == 1:
data2 = { # data2 = {
'illness_id': val2['id'], # 'illness_id': val2['id'],
'name': val2['keyword'] # 'name': val2['keyword']
} # }
dbconn.insert(table='ch_treatment', data=data2) # dbconn.insert(table='ch_symptoms', data=data2)
# # 主诉-病症
# if val2['type'] == 6:
# data2 = {
# 'illness_id': val2['id'],
# 'name': val2['keyword']
# }
# dbconn.insert(table='ch_treatment', data=data2)
dbconn.insert(table='ch_illness', data=data1) # dbconn.insert(table='ch_illness', data=data1)
return '1' return '1'
...@@ -201,7 +1082,7 @@ def rep_xd_cate(): ...@@ -201,7 +1082,7 @@ def rep_xd_cate():
@bp.route('/data_xd_cate', methods=['POST', 'GET']) @bp.route('/data_xd_cate', methods=['POST', 'GET'])
def data_xd_cate(): def data_xd_cate():
cate_data = json.loads('', encoding='utf-8') cate_data = json.load(open('catej.json'))
data_set_cate(cate_data['data']['list'], 0, 1) data_set_cate(cate_data['data']['list'], 0, 1)
...@@ -211,12 +1092,13 @@ def data_xd_cate(): ...@@ -211,12 +1092,13 @@ def data_xd_cate():
def data_set_cate(child, pid, children): def data_set_cate(child, pid, children):
for val in child: for val in child:
data3 = { data3 = {
'old_id':val['id'],
'pid': pid, 'pid': pid,
'name': val['title'], 'name': val['title'],
'children': children 'children': children
} }
cateid = dbconn.insert(table='xd_cate', data=data3) cateid = dbconn.insert(table='xd_cate', data=data3)
if len(child) == 0: if len(val['child']) == 0:
continue continue
else: else:
data_set_cate(val['child'], cateid, children + 1) data_set_cate(val['child'], cateid, children + 1)
...@@ -956,4 +1838,22 @@ def update_pet_species(url): ...@@ -956,4 +1838,22 @@ def update_pet_species(url):
def replace_self(str): def replace_self(str):
return str.replace(" ", "").replace("\t", "").replace("\n", "") return str.replace(" ", "").replace("\t", "").replace("\n", "").replace("\r","")
def download_image(url, save_path, filename):
try:
# 发送HTTP GET请求
response = requests.get(url)
# 检查请求是否成功
if response.status_code == 200:
os.makedirs(save_path, exist_ok=True)
# 以二进制写入文件
with open(save_path+filename, 'wb') as file:
file.write(response.content)
print(f"Image successfully downloaded: {save_path}")
else:
print(f"Failed to retrieve image. Status code: {response.status_code}")
except Exception as e:
print(f"An error occurred: {e}")
import re import re
from flask import Blueprint, request, json from flask import Blueprint, request, json
from app.model.base import dbconn, graph_driver from app.model.base import dbconn, graph_driver
from app.utils.alioss import AliyunOss
import time
import random
bp = Blueprint('search', __name__, url_prefix='/search') bp = Blueprint('search', __name__, url_prefix='/search')
...@@ -18,6 +21,16 @@ for val_keyword in rows: ...@@ -18,6 +21,16 @@ for val_keyword in rows:
keyword_clinical[val_keyword['keyword']] = val_keyword['clinical'] keyword_clinical[val_keyword['keyword']] = val_keyword['clinical']
@bp.route('/upload', methods=['POST', 'GET'])
def upload():
filename = "seg"+time.strftime("%Y%m%d%H%M%S", time.localtime())+str(random.randint(1000, 9999))+".png"
file = "/Users/haoyanbin/Desktop/WechatIMG24.jpeg"
img_url = AliyunOss().put_object_from_file(filename, file)
print(img_url)
return json.dumps(img_url)
@bp.route('/illness_search', methods=['POST', 'GET']) @bp.route('/illness_search', methods=['POST', 'GET'])
def illness_search(): def illness_search():
print(111) print(111)
......
...@@ -19,6 +19,14 @@ dbconn2 = MYSQL( ...@@ -19,6 +19,14 @@ dbconn2 = MYSQL(
dbcharset='utf8' dbcharset='utf8'
) )
dbconn3 = MYSQL(
dbhost='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
dbport=3306,
dbuser='dbc_saas',
dbpwd='dbc_saas888888',
dbname='medical_platform',
dbcharset='utf8'
)
# graph = Graph("", auth=("neo4j", "11111111")) # graph = Graph("", auth=("neo4j", "11111111"))
# graph = Graph("bolt://gds-2zeyv40mi12s6mjc149870pub.graphdb.rds.aliyuncs.com:3734", auth=("dbc_tair", "dbc_tair888888")) # graph = Graph("bolt://gds-2zeyv40mi12s6mjc149870pub.graphdb.rds.aliyuncs.com:3734", auth=("dbc_tair", "dbc_tair888888"))
......
import oss2
class AliyunOss(object):
def __init__(self):
self.access_key_id = "LTAI5t91PkMfeZSckddWNxiT" # 从阿里云查询到的 AccessKey 的ID
self.access_key_secret = "1MblQ0r6w9LC3tlR5O1zNxnDQKKbjH" # 从阿里云查询到的 AccessKey 的Secret
self.auth = oss2.Auth(self.access_key_id, self.access_key_secret)
self.bucket_name = "dbc-static" # 阿里云上创建好的Bucket的名称
self.endpoint = "oss-cn-beijing.aliyuncs.com" # 阿里云从Bucket中查询到的endpoint
self.dir = "ai-chest/" # 阿里云从Bucket中查询到的endpoint
self.bucket = oss2.Bucket(self.auth, self.endpoint, self.bucket_name)
# name: 在阿里云Bucket中要保存的文件名
# file: 本地图片的文件名
def put_object_from_file(self, name, file):
self.bucket.put_object_from_file(self.dir+name, file)
return "https://{}.{}/{}".format(self.dir+self.bucket_name, self.endpoint, name)
import time
import requests
import re
class GoogleTranslate(object):
def __init__(self, sl='auto', tl='', domainnames=""):
"""
A python wrapped free and unlimited API for Google Translate.
:param sl:from Language
:param tl:to Language
:param domainnames: google domainnames, for example if domainnames="com" ,the url is "translate.google.com". In China the com domainnames is blocked by GFW,you can use "cn".
"""
self.sl = sl
self.tl = tl
self.hl = tl
if domainnames == "":
self.domainnames ="com"
else:
self.domainnames = domainnames
self.TKK = getTKK(domainnames=self.domainnames)
def _returnintorzero(self, d):
try:
temp = int(d)
except Exception as e:
temp = 0
return temp
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = (a % 0x100000000) >> d if '+' == b[c + 1] else a << d
a = a + d & 4294967295 if '+' == b[c] else a ^ d
c += 3
return a
def trans(self, text):
"""
translate text
:param text: The text to be translate
:return:
"""
tk = self._gettk(text)
timeh = int(time.time() / 3600)
if self.TKK.split(".")[0] != timeh:
self.TKK = getTKK(domainnames=self.domainnames)
data = {
"client": 'webapp',
"sl": self.sl,
"tl": self.tl,
"hl": self.hl,
"dt": ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'],
"ie": 'UTF-8',
"oe": 'UTF-8',
"otf": 1,
"ssel": 0,
"tsel": 0,
"kc": 7,
"q": text,
"tk": tk
}
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br"}
url = 'https://translate.google.'+self.domainnames+'/translate_a/single'
jsonres = requests.get(url=url, headers=headers, params=data)
lines = ''
try:
for i in jsonres.json()[0]:
if i:
if i[0]:
lines = lines + i[0]
except Exception as e:
print("失败语句:")
print(text)
print("tk:")
print(e)
print('实际返回信息:')
print(jsonres.text)
raise Exception(text)
return lines
def _gettk(self, a):
d = self.TKK.split(".")
b = int(d[0])
e = []
for g in range(len(a)):
l = ord(a[g])
if 128 > l:
e.append(l)
else:
if 2048 > l:
e.append(l >> 6 | 192)
else:
if (55296 == (l & 64512) and g + 1 < len(a) and 56320 == (ord(a[g + 1]) & 64512)):
l = 65536 + ((l & 1023) << 10) + (a.charCodeAt(++g) & 1023)
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128)
e.append(l & 63 | 128)
a = b
for f in range(len(e)):
a = a + int(e[f])
a = self._xr(a, "+-a^+6")
a = self._xr(a, "+-3^+b+-f");
a ^= self._returnintorzero(d[1])
if 0 > a:
a = (a & 2147483647) + 2147483648
a %= 1E6
return str(int(a)) + "." + str(int(a) ^ b)
def getTKK(domainnames=""):
if domainnames == "":
url = "https://translate.google.com/"
else:
url = "https://translate.google." + domainnames + "/"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br"}
googleindexpage = requests.get(url, headers=headers).text
tkk = re.findall("tkk:'(\d*\.\d*)'", googleindexpage)
if len(tkk) != 0:
print(tkk[0])
return tkk[0]
else:
return None
if __name__ == '__main__':
# pass
# # This is an example.
translator = GoogleTranslate(domainnames="cn", tl="zh-CN")
text_origin = "Guía de servicios notariales y consulares en Bolivia. £100"
print(translator.trans(text_origin))
\ No newline at end of file
import pydicom.uid
import sys
import pydicom
from pylab import *
import cv2
import json
def contours_in(contours):
p = np.zeros(shape=(2000,2000))
cv2.drawContours(p, contours, -1, 255, -1)
a = np.where(p==255)[0].reshape(-1,1)
b = np.where(p==255)[1].reshape(-1,1)
coordinate = np.concatenate([a,b], axis=1).tolist()
inside = [tuple(x) for x in coordinate]
return inside
if __name__ == '__main__':
part = 'belly'
# image_path = '/Users/haoyanbin/Desktop/0e9df40b-2dd1022a-c9fe09f4-c337c767-34502cca_cropped_image.png'
image_path = '/Users/haoyanbin/Desktop/db0d2877-16a5787c-4c8d67c4-071097fc-deee3b7e.png'
# image = Image.open(image_path)
# pixel_array = np.array(image)
# dcm_path = '/Users/haoyanbin/Desktop/a4f9c3a5-4ba2-4adb-b408-3d3a0df1d912.dcm'
dcm_path = '/Users/haoyanbin/Desktop/response3.dcm'
dicom_image = pydicom.dcmread(dcm_path)
dic_img = dicom_image.pixel_array
img = cv2.bitwise_not(dic_img)
# img = cv2.cvtColor(pixel_array)
# img = cv2.imread(image_path)
height, width = img.shape[:2]
image = np.zeros((height, width, 1), np.uint8)
# image2 = np.zeros((height, width, 3), np.uint8)
# data_json = r'{"visible":true,"active":false,"invalidated":false,"handles":{"points":[{"x":1075.0994182303004,"y":1325.3899033176901,"highlight":true,"active":true,"lines":[{"x":1206.9569189047063,"y":1259.4611529804872}]},{"x":1206.9569189047063,"y":1259.4611529804872,"highlight":true,"active":true,"lines":[{"x":1206.9569189047063,"y":1256.7141217164371}]},{"x":1206.9569189047063,"y":1256.7141217164371,"highlight":true,"active":true,"lines":[{"x":1209.7039501687564,"y":1256.7141217164371}]},{"x":1209.7039501687564,"y":1256.7141217164371,"highlight":true,"active":true,"lines":[{"x":1308.5970756745608,"y":1179.7972463230337}]},{"x":1308.5970756745608,"y":1179.7972463230337,"highlight":true,"active":true,"lines":[{"x":1347.0555133712626,"y":1163.315058738733}]},{"x":1347.0555133712626,"y":1163.315058738733,"highlight":true,"active":true,"lines":[{"x":1341.5614508431622,"y":1017.7224017440765}]},{"x":1341.5614508431622,"y":1017.7224017440765,"highlight":true,"active":true,"lines":[{"x":1336.067388315062,"y":844.6594321089187}]},{"x":1336.067388315062,"y":844.6594321089187,"highlight":true,"active":true,"lines":[{"x":1336.067388315062,"y":732.0311502828636}]},{"x":1336.067388315062,"y":732.0311502828636,"highlight":true,"active":true,"lines":[{"x":1267.3916067138089,"y":721.0430252266631}]},{"x":1267.3916067138089,"y":721.0430252266631,"highlight":true,"active":true,"lines":[{"x":1264.6445754497588,"y":721.0430252266631}]},{"x":1264.6445754497588,"y":721.0430252266631,"highlight":true,"active":true,"lines":[{"x":1264.6445754497588,"y":723.7900564907131}]},{"x":1264.6445754497588,"y":723.7900564907131,"highlight":true,"active":true,"lines":[{"x":1209.7039501687564,"y":803.4539631481668}]},{"x":1209.7039501687564,"y":803.4539631481668,"highlight":true,"active":true,"lines":[{"x":1209.7039501687564,"y":806.2009944122169}]},{"x":1209.7039501687564,"y":806.2009944122169,"highlight":true,"active":true,"lines":[{"x":1206.9569189047063,"y":808.948025676267}]},{"x":1206.9569189047063,"y":808.948025676267,"highlight":true,"active":true,"lines":[{"x":1176.739575000155,"y":902.3470886539712}]},{"x":1176.739575000155,"y":902.3470886539712,"highlight":true,"active":true,"lines":[{"x":1176.739575000155,"y":905.0941199180213}]},{"x":1176.739575000155,"y":905.0941199180213,"highlight":true,"active":true,"lines":[{"x":1176.739575000155,"y":907.8411511820714}]},{"x":1176.739575000155,"y":907.8411511820714,"highlight":true,"active":true,"lines":[{"x":1176.739575000155,"y":910.5881824461217}]},{"x":1176.739575000155,"y":910.5881824461217,"highlight":true,"active":true,"lines":[{"x":1165.7514499439544,"y":1006.734276687876}]},{"x":1165.7514499439544,"y":1006.734276687876,"highlight":true,"active":true,"lines":[{"x":1132.787074775353,"y":1124.856621042031}]},{"x":1132.787074775353,"y":1124.856621042031,"highlight":true,"active":true,"lines":[{"x":1105.3167621348516,"y":1218.2556840197353}]},{"x":1105.3167621348516,"y":1218.2556840197353,"highlight":true,"active":true,"lines":[{"x":1105.3167621348516,"y":1221.0027152837854}]},{"x":1105.3167621348516,"y":1221.0027152837854,"highlight":true,"active":true,"lines":[{"x":1102.5697308708015,"y":1221.0027152837854}]},{"x":1102.5697308708015,"y":1221.0027152837854,"highlight":true,"active":true,"lines":[{"x":1102.5697308708015,"y":1223.7497465478357}]},{"x":1102.5697308708015,"y":1223.7497465478357,"highlight":true,"active":true,"lines":[{"x":1102.5697308708015,"y":1226.4967778118857}]},{"x":1102.5697308708015,"y":1226.4967778118857,"highlight":true,"active":true,"lines":[{"x":1077.8464494943505,"y":1297.9195906771888}]},{"x":1077.8464494943505,"y":1297.9195906771888,"highlight":true,"active":true,"lines":[{"x":1075.0994182303004,"y":1325.3899033176901,"highlight":true,"active":true,"lines":[{"x":1206.9569189047063,"y":1259.4611529804872}]}]}],"textBox":{"active":false,"hasMoved":true,"movesIndependently":false,"drawnIndependently":true,"allowedOutsideImage":true,"hasBoundingBox":true,"x":739.9616040161853,"y":1207.2675589635348,"boundingBox":{"width":132.6845703125,"height":24,"left":530.1999969482422,"top":87.20000076293945},"moving":false},"invalidHandlePlacement":false},"uuid":"4d21ceae-710e-49ab-90c5-6a97db7870f6","canComplete":false,"highlight":false,"polyBoundingBox":{"left":1075.0994182303004,"top":721.0430252266631,"width":271.95609514096213,"height":604.3468780910271},"area":2611.8905008669453,"info":"{\"_type\":2,\"de_position_id\":{\"label\":\"胸部\",\"value\":4,\"key\":\"0\"},\"de_system_id\":{\"label\":\"肺\",\"value\":27,\"key\":\"5\"},\"de_organ_id\":{\"label\":\"椎隔三角区\",\"value\":209,\"key\":\"0\"},\"de_disease_id\":{\"label\":\"胸腔积液\",\"value\":129,\"key\":\"4\"},\"de_symptom_id\":{\"label\":\"侧位-肺叶边缘回缩\",\"value\":154,\"key\":\"6\"}}"}'
data_json = r'{"body_examined":"head","body_examined_property":0.4,"conclusion":"分割成功","instance_id":"a97bbe0a-92563041-fed5e016-eac0e5f6-0f72d4af","points":{"belly":[[[3057,1347]],[[3020,1330]],[[2734,1275]],[[2549,1264]],[[2433,1291]],[[2305,1352]],[[2241,1426]],[[2206,1686]],[[2141,1772]],[[2063,1802]],[[2032,1859]],[[1970,1917]],[[1895,2034]],[[1822,2198]],[[1798,2275]],[[1782,2389]],[[1800,2501]],[[1826,2583]],[[1851,2615]],[[1902,2646]],[[1968,2662]],[[2237,2664]],[[2401,2627]],[[2769,2510]],[[2981,2457]],[[3057,2421]]],"chest":[[[2267,1332]],[[2250,1325]],[[2080,1346]],[[1613,1437]],[[1424,1490]],[[1345,1487]],[[1232,1515]],[[1034,1538]],[[918,1572]],[[846,1573]],[[752,1611]],[[712,1635]],[[666,1709]],[[626,1949]],[[593,2065]],[[600,2135]],[[652,2182]],[[712,2261]],[[839,2361]],[[1069,2473]],[[1159,2500]],[[1233,2508]],[[1395,2558]],[[1807,2635]],[[1827,2618]],[[1815,2592]],[[1813,2541]],[[1783,2468]],[[1771,2364]],[[1819,2181]],[[1888,2027]],[[1958,1911]],[[2058,1797]],[[2135,1764]],[[2189,1704]],[[2207,1661]],[[2211,1552]],[[2240,1423]],[[2266,1371]]],"forelimb":[[[175,1431]],[[133,1429]],[[110,1442]],[[60,1448]],[[35,1469]],[[23,1506]],[[0,1517]],[[0,2460]],[[10,2462]],[[25,2539]],[[38,2562]],[[82,2566]],[[126,2548]],[[303,2431]],[[439,2421]],[[444,2399]],[[400,2349]],[[382,2306]],[[381,2277]],[[401,2214]],[[392,2128]],[[433,2064]],[[488,2055]],[[503,2036]],[[505,1990]],[[465,1896]],[[449,1777]],[[393,1659]],[[389,1612]],[[431,1589]],[[429,1565]],[[373,1506]]],"head":[],"heart":[[[1052,1945]],[[1028,1996]],[[1010,2072]],[[1021,2138]],[[1106,2326]],[[1202,2447]],[[1256,2481]],[[1376,2531]],[[1508,2565]],[[1673,2582]],[[1718,2563]],[[1734,2540]],[[1736,2461]],[[1710,2237]],[[1609,2014]],[[1587,1946]],[[1548,1897]],[[1469,1846]],[[1280,1803]],[[1215,1821]],[[1160,1872]],[[1105,1901]]],"hindlimb":[],"sternum":[[[1110,2539]],[[1133,2571]],[[1208,2612]],[[1354,2665]],[[1475,2675]],[[1660,2721]],[[1772,2714]],[[1912,2690]],[[1872,2658]],[[1779,2660]],[[1683,2633]],[[1595,2628]],[[1497,2589]],[[1394,2575]],[[1213,2517]],[[1117,2523]]],"vertebra":[[[3057,1122]],[[3019,1101]],[[2918,1107]],[[2756,1081]],[[2691,1082]],[[2613,1051]],[[2534,1047]],[[2447,1073]],[[2310,1079]],[[2233,1116]],[[2184,1120]],[[1976,1187]],[[1820,1219]],[[1715,1225]],[[1656,1247]],[[1576,1255]],[[1499,1276]],[[1401,1278]],[[1164,1309]],[[681,1351]],[[637,1374]],[[608,1374]],[[564,1394]],[[553,1422]],[[579,1469]],[[679,1497]],[[924,1518]],[[1326,1470]],[[1473,1463]],[[2189,1317]],[[2374,1288]],[[2568,1281]],[[2721,1294]],[[3020,1340]],[[3057,1335]]]},"show_img":"https://ai-bianque.oss-cn-beijing.aliyuncs.com/a97bbe0a-92563041-fed5e016-eac0e5f6-0f72d4af_organ_seg.jpg"}'
data = json.loads(data_json)
points = data['points'][part]
i = 0
c = len(points)
area = []
while i < c:
area.append([points[i][0][0],points[i][0][1]])
# area.append([int(points[i][0]),int(points[i][0])])
i+=1
if i == c:
area.append([points[0][0][0],points[0][0][1]])
# pts = np.array(area, np.int32)
# pts = pts.reshape((-1,1,2))
cv2.fillPoly(image, np.array([area]), color=(255, 255, 255))
if part == 'chest':
points2 = data['points']['heart']
i = 0
c = len(points2)
area2 = []
while i < c:
area2.append([points2[i][0][0],points2[i][0][1]])
# area.append([int(points[i][0]),int(points[i][0])])
i+=1
if i == c:
area2.append([points2[0][0][0],points2[0][0][1]])
cv2.fillPoly(image, np.array([area2]), color=(0, 0, 0))
# cv2.fillPoly(image, pixel_array, color=(255, 255, 255))
# cv2.imshow('image', image)
# cv2.waitKey()
# cv2.destroyAllWindows()
ret, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
image3 = np.zeros((height, width, 1), np.uint8)
cv2.drawContours(image3, contours, -1, 255, -1)
a = np.where(image3==255)[0].reshape(-1,1)
b = np.where(image3==255)[1].reshape(-1,1)
coordinate = np.concatenate([a,b], axis=1).tolist()
gray_kv = {}
gray_value = []
gray_num = 0
gn = 0
for n in coordinate:
gray_kv[(n[0],n[1])] = img[n[0],n[1]]
gray_value.append(img[n[0],n[1]])
gray_num += img[n[0],n[1]]
gn += 1
max_gray_value = max(gray_value)
min_gray_value = min(gray_value)
for k,v in gray_kv.items():
if v == max_gray_value:
print(k)
if v == min_gray_value:
print(k)
print(max(gray_value))
print(min(gray_value))
print(gray_num)
print(gn)
print(gray_num/gn)
# for n in coordinate:
# image2[n[0],n[1]] = image2[n[0],n[1]] + [255,255,255]
# cv2.imshow('image', image2)
# cv2.waitKey()
# cv2.destroyAllWindows()
# z = []
# for x in range(height):
# for y in range(width):
# if image[x,y] == [255, 255, 255]:
# z.append[x,y]
# 设置颜色阈值
# lower_green = np.array([254, 254, 254])
# upper_green = np.array([255, 255, 255])
# # 获取特定颜色范围内的点位
# mask = cv2.inRange(image, lower_green, upper_green)
# # 使用cv2.findNonZero()来找到非零像素点的坐标
# non_zero_points = cv2.findNonZero(mask)
# # 打印点位数量
# print(f"Number of green points: {len(non_zero_points)}")
# # 如果需要可视化点位
# # 创建一个和原图一样大小的图片,并用点位标记
# green_points_image = cv2.cvtColor(np.zeros_like(image), cv2.COLOR_GRAY2BGR)
# for point in non_zero_points:
# x, y = point[0][0], point[0][1]
# cv2.circle(green_points_image, (x, y), 5, (0, 255, 0), -1)
# # cv2.polylines(image,[pts],True,(255,255,255))
# cv2.imshow('image', green_points_image)
# cv2.waitKey()
# cv2.destroyAllWindows()
# ret, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
# cv2.imshow('image', binary_image)
# cv2.waitKey()
# cv2.destroyAllWindows()
# contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# contour_image = cv2.cvtColor(image2, cv2.COLOR_GRAY2BGR)
# contours_in(contours)
# image3 = np.zeros((height, width, 1), np.uint8)
# cv2.drawContours(image3, contours, -1, 255, -1)
# a = np.where(image3==255)[0].reshape(-1,1)
# b = np.where(image3==255)[1].reshape(-1,1)
# coordinate = np.concatenate([a,b], axis=1).tolist()
# ab = []
# inside = [ for x in coordinate]
# contour_image = cv2.cvtColor(image2, cv2.COLOR_GRAY2BGR)
# cv2.drawContours(contour_image, inside, -1, (255,255,255), 2)
# for n in coordinate:
# image2[n[0],n[1]] = image2[n[0],n[1]] + [255,255,255]
# cv2.imshow('image', image2)
# cv2.waitKey()
# cv2.destroyAllWindows()
# cv2.fillPoly(pixel_array, np.array([area]), color=(255, 255, 255))
# cv2.imwrite(output_path+filename, image)
# lower_threshold = 100 # 设置下限阈值
# upper_threshold = 200 # 设置上限阈值
# tissue_mask = (pixel_array > lower_threshold) & (pixel_array < upper_threshold)
# tissue_density = np.mean(pixel_array)
# print(tissue_density)
# plt.imshow(pixel_array, cmap='gray')
# # plt.contour(tissue_mask, colors='r') # 在图像上叠加感兴趣区域的轮廓
# plt.title(f'Tissue Density: {tissue_density:.2f}')
# plt.show()
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -67,6 +67,26 @@ class MYSQL: ...@@ -67,6 +67,26 @@ class MYSQL:
self.connection.commit() self.connection.commit()
return last_id return last_id
def insert_all(self, table, data):
"""mysql insert() function"""
with self.connection.cursor() as cursor:
params = self.join_field_value(data[0])
sql = "INSERT IGNORE INTO {table} SET {params}".format(
table=table, params=params)
ins_data = []
for v in data:
ins_data.append(tuple(v.values()))
cursor.executemany(sql, ins_data)
# last_id = self.connection.insert_id()
self.connection.commit()
return
def delete(self, table, condition=None, limit=None): def delete(self, table, condition=None, limit=None):
""" """
mysql delete() function mysql delete() function
...@@ -201,7 +221,7 @@ class MYSQL: ...@@ -201,7 +221,7 @@ class MYSQL:
return cursor.fetchone() if fetchone else cursor.fetchall() return cursor.fetchone() if fetchone else cursor.fetchall()
def query(self, sql, fetchone=False, execute=False): def query(self, sql, fetchone=False, execute=False):
print(sql) # print(sql)
"""execute custom sql query""" """execute custom sql query"""
with self.connection.cursor() as cursor: with self.connection.cursor() as cursor:
cursor.execute(sql) cursor.execute(sql)
......
from pymysql import connect, cursors
import os
import requests
import json
from PIL import Image
import cv2
import numpy as np
import xlwt
dbconn = connect(
host='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
port=3306,
user='ai_root',
password='ai_root888',
db='medical_platform',
charset='utf8',
cursorclass=cursors.DictCursor)
# dbconn = connect(
# host=dbhost,
# port=dbport,
# user=dbuser,
# password=dbpwd,
# db=dbname,
# charset=dbcharset,
# cursorclass=cursors.DictCursor)
def query(sql):
with dbconn.cursor() as cursor:
cursor.execute(sql)
dbconn.commit()
return cursor.fetchall()
def get_files_in_directory(directory):
file_paths = []
for root, dirs, files in os.walk(directory):
for file in files:
file_paths.append(os.path.join(root, file))
return file_paths
def copy_and_save_image(input_image_path, output_image_path, filename):
try:
os.makedirs(output_image_path, exist_ok=True)
img = Image.open(input_image_path)
img.copy().save(output_image_path+filename)
except Exception as e:
# 异常处理代码
print(e)
def cp_img(name):
i = 0
size = 100
while i <= 100:
page = i * size
i += 1
sql_str = 'select m.de_instances_id, s.de_kind_id, m.de_position_name, m.de_system_name, m.de_organ_name, m.de_disease_name, m.de_symptom_name, i.png, i.is_boost, i.boost_png' \
' from hos_database.de_instance_new_mark as m' \
' left join hos_database.de_instances as i on m.de_instances_id = i.id ' \
' left join hos_database.de_system as s on m.de_system_id = s.id ' \
' where m.de_organ_name = "'+name+'"' \
' limit ' + str(page) + ', ' + str(size)
rows = query(sql_str)
if len(rows) == 0 :
break
print(i)
de_instance_id = ''
for v3 in rows:
de_instance_id += str(v3['de_instances_id'])+','
de_instance_id = de_instance_id.strip(',')
# examine_pass_status 审核状态 1 未完成 2 未通过 3 通过 4 待审核
# instance_type 基础模块 1图像质量2 摆位 3 区域/器官 4 病症
# valid_status 病症状态 1有病症 2 无病症 3 无效 4 未标记 5 不确定
sql_str2 = 'select de_instance_id from medical_platform.dcm_image_list_instance' \
' where de_instance_id in ('+de_instance_id+')' \
' and instance_type = 4 '
rows2 = query(sql_str2)
for v2 in rows2:
for v in rows:
if v2['de_instance_id'] == v['de_instances_id']:
png = ''
if v['is_boost'] == 1:
png = v['boost_png']
else:
png = v['png']
species = ''
if v['de_kind_id'] == 1:
species = 'cat'
else:
species = 'dog'
# print(v['de_instances_id'], v['de_organ_name'], v['de_disease_name'], v['de_symptom_name'], jpeg, png, v['_value'])
filename = png.split("/")[-1]
# download_image(png, "/disk0/data/images/disease/"+name+"/",filename)
input_path = "/disk0/data/images/positive/"+name+"/"+filename # 输入图片文件路径
output_path = "/disk0/data/organ/"+species+"/"+v['de_position_name']+"/"+v['de_system_name']+"/"+v['de_organ_name']+"/" # 输出图片文件路径
copy_and_save_image(input_path, output_path,filename)
output_path2 = "/disk0/data/symptom/"+species+"/"+v['de_position_name']+"/"+v['de_system_name']+"/"+v['de_symptom_name']+"/" # 输出图片文件路径
copy_and_save_image(input_path, output_path2,filename)
# 阳性
def img1(name):
i = 0
size = 100
while i <= 100:
page = i * size
i += 1
sql_str = 'select m.de_instances_id, m._value, m.de_organ_name, m.de_disease_name, m.de_symptom_name, i.jpeg, i.png, i.is_boost, i.boost_jpeg, i.boost_png' \
' from hos_database.de_instance_new_mark as m' \
' left join hos_database.de_instances as i on m.de_instances_id = i.id ' \
' where m.de_disease_name = "'+name+'"' \
' limit ' + str(page) + ', ' + str(size)
rows = query(sql_str)
if len(rows) == 0 :
break
print(i)
de_instance_id = ''
for v3 in rows:
de_instance_id += str(v3['de_instances_id'])+','
de_instance_id = de_instance_id.strip(',')
# examine_pass_status 审核状态 1 未完成 2 未通过 3 通过 4 待审核
# instance_type 基础模块 1图像质量2 摆位 3 区域/器官 4 病症
# valid_status 病症状态 1有病症 2 无病症 3 无效 4 未标记 5 不确定
sql_str2 = 'select de_instance_id from medical_platform.dcm_image_list_instance' \
' where de_instance_id in ('+de_instance_id+')' \
' and instance_type = 4 '
rows2 = query(sql_str2)
for v2 in rows2:
for v in rows:
if v2['de_instance_id'] == v['de_instances_id']:
jpeg = ''
png = ''
if v['is_boost'] == 1:
jpeg = v['boost_jpeg']
png = v['boost_png']
else:
jpeg = v['jpeg']
png = v['png']
# print(v['de_instances_id'], v['de_organ_name'], v['de_disease_name'], v['de_symptom_name'], jpeg, png, v['_value'])
filename = png.split("/")[-1]
download_image(png, "/disk0/data/images/disease/"+name+"/",filename)
# 阴性
def img2():
i = 0
size = 100
while i <= 100:
page = i * size
i += 1
# examine_pass_status 审核状态 1 未完成 2 未通过 3 通过 4 待审核
# instance_type 基础模块 1图像质量2 摆位 3 区域/器官 4 病症
# valid_status 病症状态 1有病症 2 无病症 3 无效 4 未标记 5 不确定
sql_str = 'select de_instance_id from medical_platform.dcm_image_list_instance' \
' where instance_type = 4 and examine_pass_status = 3 and valid_status = 2' \
' order by id desc limit ' + str(page) + ', ' + str(size)
rows = query(sql_str)
if len(rows) == 0:
break
de_instance_id = ''
for v2 in rows:
de_instance_id += str(v2['de_instance_id'])+','
de_instance_id = de_instance_id.strip(',')
sql_str3 = 'select m.de_instances_id, m._value, m.de_organ_name, m.de_disease_name, m.de_symptom_name, i.ethnic_group, i.jpeg, i.png, i.is_boost, i.boost_jpeg, i.boost_png' \
' from hos_database.de_instances as i' \
' left join hos_database.de_instance_new_mark as m on m.de_instances_id = i.id ' \
' where i.id in ('+de_instance_id+')'
rows3 = query(sql_str3)
print(i)
if len(rows3) == 0:
continue
for v3 in rows3:
if v3['de_instances_id'] is None:
png = ''
if v3['is_boost'] == 1:
png = v3['boost_png']
else:
png = v3['png']
# species = ''
# if v3['ethnic_group'] == '猫':
# species = 'cat'
# else:
# species = 'dog'
# print(v['de_instances_id'], v['de_organ_name'], v['de_disease_name'], v['de_symptom_name'], jpeg, png, v['_value'])
filename = png.split("/")[-1]
output_path = "/disk0/data/negative02/" # 输出图片文件路径
download_image(png, output_path, filename)
def download_image(url, save_path, filename):
try:
# 发送HTTP GET请求
response = requests.get(url)
# 检查请求是否成功
if response.status_code == 200:
os.makedirs(save_path, exist_ok=True)
# 以二进制写入文件
with open(save_path+filename, 'wb') as file:
file.write(response.content)
print(f"Image successfully downloaded: {save_path}")
else:
print(f"Failed to retrieve image. Status code: {response.status_code}")
except Exception as e:
print(f"An error occurred: {e}")
# 单
def froi_img(image_path, output_path, filename, data):
try:
img = cv2.imread(image_path)
height, width = img.shape[:2]
image = np.zeros((height, width, 3), np.uint8)
points = data['handles']['points']
i = 0
c = len(points)
area = []
while i < c:
area.append([int(points[i]['x']),int(points[i]['y'])])
i+=1
if i == c:
area.append([int(points[0]['x']),int(points[0]['y'])])
cv2.fillPoly(image,np.array([area]), color=(255, 255, 255))
cv2.imwrite(output_path+filename, image)
except Exception as e:
# 异常处理代码
print(e)
def froi(de_position_name, de_disease_name):
i = 17
size = 200
while i <= 36:
page = i * size
i += 1
sql_str3 = 'select m.de_instances_id, m._value, m.de_organ_id, m.de_organ_name, m.de_disease_id, m.de_disease_name, m.de_symptom_id, m.de_symptom_name, i.ethnic_group, i.jpeg, i.png, i.is_boost, i.boost_jpeg, i.boost_png' \
' from hos_database.de_instances as i' \
' left join hos_database.de_instance_new_mark as m on m.de_instances_id = i.id ' \
' where m.de_position_name="'+de_position_name + '" and m.de_disease_name = "'+de_disease_name + '" and mark_type = "FreehandRoi"' \
' limit ' + str(page) + ', ' + str(size)
rows3 = query(sql_str3)
print(i)
if len(rows3) == 0:
continue
for v3 in rows3:
png = ''
if v3['is_boost'] == 1:
png = v3['boost_png']
else:
png = v3['png']
filename = png.split("/")[-1]
input_path = "/disk0/data/images/disease/"+de_disease_name+"/"+filename # 输入图片文件路径
output_path = "/disk0/data/disease_symptom/"+de_disease_name+"/" # 输入图片文件路径
output_path_ori = "/disk0/data/disease_symptom/"+de_disease_name+"_ori/" # 输入图片文件路径
data_froi = json.loads(v3['_value'])
save_filename = str(v3['de_symptom_id'])+"_"+filename
copy_and_save_image(input_path, output_path_ori, save_filename)
froi_img(input_path, output_path, save_filename, data_froi)
# 多
def froi_img2(image_path, output_path, filename, data):
try:
img = cv2.imread(image_path)
height, width = img.shape[:2]
image = np.zeros((height, width, 3), np.uint8)
for v in data:
points = v['handles']['points']
i = 0
c = len(points)
area = []
while i < c:
area.append([int(points[i]['x']),int(points[i]['y'])])
i+=1
if i == c:
area.append([int(points[0]['x']),int(points[0]['y'])])
cv2.fillPoly(image,np.array([area]), color=(255, 255, 255))
cv2.imwrite(output_path+filename, image)
except Exception as e:
# 异常处理代码
print(e)
def froi2():
i = 44
size = 100
while i <= 100:
page = i * size
i += 1
sql_str2 = "select de_instances_id from hos_database.de_instance_new_mark as m " \
" where m.de_organ_name in ('全肺','右肺','右肺中叶','右肺前叶','右肺后叶','左肺','左肺前叶前部','左肺前叶后部','左肺后叶') and mark_type = 'FreehandRoi'" \
" limit " + str(page) + ", " + str(size)
rows2 = query(sql_str2)
de_instance_id = ''
for v2 in rows2:
de_instance_id += str(v2['de_instances_id'])+','
de_instance_id = de_instance_id.strip(',')
sql_str3 = 'select m.de_instances_id, m._value, m.de_organ_id, m.de_organ_name, m.de_disease_id, m.de_disease_name, m.de_symptom_id, m.de_symptom_name, i.ethnic_group, i.jpeg, i.png, i.is_boost, i.boost_jpeg, i.boost_png' \
' from hos_database.de_instances as i' \
' left join hos_database.de_instance_new_mark as m on m.de_instances_id = i.id ' \
' where m.de_instances_id in (' + de_instance_id + ') and mark_type = "FreehandRoi"'
rows3 = query(sql_str3)
print(i)
if len(rows3) == 0:
continue
frois = []
data_frois = {}
for vv in rows3:
if data_frois.get(vv['de_instances_id']) is None :
frois.append(vv)
data_frois[vv['de_instances_id']] = []
data_frois[vv['de_instances_id']].append(json.loads(vv['_value']))
for v3 in frois:
png = ''
if v3['is_boost'] == 1:
png = v3['boost_png']
else:
png = v3['png']
filename = png.split("/")[-1]
# input_path = "/disk0/data/images/disease/"+v3['de_disease_name']+"/"+filename # 输入图片文件路径
output_path = "/disk0/data/organ02/lungs/" # 输入图片文件路径
output_path_ori = "/disk0/data/organ02/lungs_ori/" # 输入图片文件路径
save_filename = str(v3['de_organ_id'])+"_"+filename
download_image(png, output_path_ori, save_filename)
froi_img2(output_path_ori+save_filename, output_path, save_filename, data_frois[v3['de_instances_id']])
def froi22(name):
sql_str = "select study_instance_uid, manufacturers_model_name from hos_database.dcm_list as d " \
" where manufacturers_model_name ='"+name+"'" \
" order by id desc limit 20 "
rows = query(sql_str)
u1 = r'https://viewer.ai-vpet.cn/viewer/'
# u2 = r'?token=bXOBCY7M49NpTTGPHApDATRu%2FgY0PRp6ofB30I%2BfH3Zp%2BAZz4hoceaj9FDqy%2FGEA%2BTQvLmBGmMn0y0DPA67pVf6JfQR0LQm0qrBtuCPHHG%2F8x8di3C9Fu2sCwRXbyh93ftjY7%2Bi%2FDOQ6Ib4y1EHafg%3D%3D'
u2 = r'?token=L4CCTl5V4p%2FoFxXzhkoI%2BYj8LOS3oyRLXVX3KSCE1vZM7RpBTQsmNVUBKKmAzHorWTmBoAN2z7wRnoczmB6HytITFLtw9FCFTmIkIBrpyJaI1QZS3uc6peZTEpmcyBhICCZ%2B5F0tZOhe78Be5DoYEw%3D%3D'
print(name)
print('\n')
for v in rows:
print(u1+v['study_instance_uid']+u2)
print('\n')
def froi24():
names2 = [
'1.2.410.200067.100.1.202306132151100227.16179',
'1.2.410.200067.100.1.202306111919530050.13775',
'1.2.410.200067.100.1.202306101808590285.1618',
'1.2.410.200067.100.1.202306221642290522.30621',
'1.2.410.200067.100.1.202406211032310879.15457',
'1.2.410.200067.100.1.202408021330260999.3052',
'1.2.276.0.7230010.3.3.1.20240828000000000.859487075',
'1.2.840.197608.1.15212033170821.1788',
'1.2.840.197608.1.15212033170821.16',
'172.115.1207.167.1218.1219.20240827204621.114',
'1228.184.1232.1223.158.197.20240828173020.1',
'1.2.156.112677.1000.101.20240830182741.1',
'1.2.156.112677.1000.101.20240830180240.31'
]
# encoding:设置编码,默认是ascii,一般设置为utf-8,可支持中文;
#style_compression保持默认即可
work_book = xlwt.Workbook(encoding='utf-8')
# 创建一个sheet对象,相当于创建一个sheet页,填入sheet页的名称
sheet_data = work_book.add_sheet('sheet1')
# 向sheet页中添加数据:函数write,参数分别带入(行号,列号,填入的值),行和列从0开始。
#其中还有一个参数style=Style.default_style,用于设置字体/单元格格式/对齐方式等,不设置会使用默认值。
sheet_data.write(0,0,'医院名称') # 第1行第1列写入数据 此处先不用样式,后面介绍
sheet_data.write(0,1,'联系电话')
sheet_data.write(0,2,'医院码')
sheet_data.write(0,3,'来源')
sheet_data.write(0,4,'uid')
sheet_data.write(0,5,'厂家')
sheet_data.write(0,6,'url')
for k,v in enumerate(names2):
sql_str = "select it.id, study_instance_uid, manufacturers_model_name, it._value "\
" from hos_database.de_instance_tag as it " \
" left join hos_database.de_instances as i on it.de_instances_id = i.id " \
" left join hos_database.de_series as s on i.de_series_id = s.id " \
" left join hos_database.de_study as t on s.de_study_id = t.id " \
" where study_instance_uid ='"+v+"'"
rows = query(sql_str)
sql_str2 = "select h._name, h.hospital_code, h.create_phone, h.is_bk "\
" from hos_database.dcm_list as l " \
" left join hos_database.hospital_main as h on l.hospital_code = h.hospital_code " \
" where study_instance_uid ='"+v+"'"
rows2 = query(sql_str2)
u1 = r'https://viewer.ai-vpet.cn/viewer/'
# u2 = r'?token=bXOBCY7M49NpTTGPHApDATRu%2FgY0PRp6ofB30I%2BfH3Zp%2BAZz4hoceaj9FDqy%2FGEA%2BTQvLmBGmMn0y0DPA67pVf6JfQR0LQm0qrBtuCPHHG%2F8x8di3C9Fu2sCwRXbyh93ftjY7%2Bi%2FDOQ6Ib4y1EHafg%3D%3D'
u2 = r'?token=g4BFXADwt8fWcqVddlQSo4iqx8O%2BWBJwtrg11nXHj3z0tsQX0ZaGNivn5jYHoMEZN4jltj5ZU%2BbP4ztqNbwYEZhTXOf0J0szHsf7a19lWJPIutkw4I3Gg%2B474FhY73CqQn0lv%2FmcJK9v37hyf24Z6g%3D%3D'
data = json.loads(rows[0]['_value'])
url = u1+rows[0]['study_instance_uid']+u2
# 0 vpet 1 谛宝医生 2 必康
is_bk = ''
if rows2[0]['is_bk'] == 0 :
is_bk = 'vpet'
elif rows2[0]['is_bk'] == 1 :
is_bk = '谛宝医生'
elif rows2[0]['is_bk'] == 2 :
is_bk = '必康'
l = k+1
sheet_data.write(l,0,rows2[0]['_name'])
sheet_data.write(l,1,rows2[0]['create_phone'])
sheet_data.write(l,2,rows2[0]['hospital_code'])
sheet_data.write(l,3,is_bk)
sheet_data.write(l,4,v)
sheet_data.write(l,5,data['0008,0070']['Value'])
sheet_data.write(l,6,url)
#保存为后缀为xls或者xlsx的excel表
#保存为xlsx时,后续的设置样式不会生效 所以我们保存为xls后缀文件
work_book.save('1.xls')
def froi31(image_path):
try:
img = cv2.imread(image_path)
a = np.mean(img)
print(a)
except Exception as e:
# 异常处理代码
print(e)
if __name__ == '__main__':
de_disease_names = [
"乳腺肿瘤",
"会阴疝",
"体表肿物",
"便秘",
"关节炎",
"关节脱位",
"其他",
"前列腺结石",
"前列腺肿大",
"占位性病变",
"右心房增大",
"后腔静脉裂孔疝",
"哮喘",
"子宫蓄脓",
"小肝征",
"尿道结石",
"尿闭",
"巨大团块",
"巨结肠",
"幼龄动物",
"幽门梗阻",
"心源性肺水肿",
"心脏增大",
"支气管炎",
"椎体脱位",
"椎体错位",
"横膈疝",
"气管塌陷",
"气胸",
"犬未到达T12后缘",
"猫未到达T13后缘",
"皮下气肿",
"皮下积气",
"肋骨肋软骨矿化",
"肝区占位性病变",
"肝脏增大",
"肝脏缩小",
"肝脏肿大",
"肠梗阻",
"肠道异物",
"肺不张",
"肺大疱",
"肺气肿",
"肺水肿",
"肺炎",
"肾结石",
"肾肿大",
"肾萎缩",
"肿瘤",
"肿瘤转移性改变",
"胃体积增大",
"胃内异物",
"胃后区体积增大",
"胃扩张扭转",
"胃扩张积气",
"胃肠道穿孔",
"胆囊壁矿化",
"胆囊结石",
"胸腔积液",
"脾脏肿大",
"腹壁疝",
"腹股沟疝",
"腹腔积气",
"腹腔积液",
"腹膜心包疝",
"膀胱破裂",
"膀胱结石",
"输尿管结石",
"锥体脱位",
"食道异物",
"食道积气",
"食道积液",
"食道积食",
"食道裂孔疝",
"骨折",
"骨折后愈合",
"骨质增生",
"骨质疏松",
"髋关节发育不良"
]
de_organ_names = [
"中腹区",
"主动脉(弓)",
"体表",
"全心",
"全肺",
"前列腺区",
"前腹区",
"双侧皮下",
"右侧皮下",
"右心室",
"右心房",
"右肺",
"右肺中叶",
"右肺前叶",
"右肺后叶",
"后腔静脉",
"后腹区",
"子宫及其附件区",
"子宫及附件区",
"小肠",
"尺骨",
"尾椎",
"尿道",
"左侧皮下",
"左心室",
"左心房",
"左心脏",
"左肺",
"左肺前叶前部",
"左肺前叶后部",
"左肺后叶",
"左腿",
"心前三角区",
"心包",
"心区",
"心胸三角区",
"掌骨",
"桡骨",
"椎隔三角区",
"皮下",
"结肠",
"肋软骨",
"肋骨",
"肝脏",
"股骨",
"肱骨",
"肺",
"肾",
"胃",
"胆囊",
"背侧皮下",
"背部皮下",
"胫骨",
"胸椎",
"胸骨",
"脾脏",
"腓骨",
"腰椎",
"腹侧皮下",
"腹膜腔",
"腹部皮下",
"膀胱",
"膝关节",
"花瓣",
"荐椎",
"趾骨",
"输尿管",
"近贲门处",
"骨盆区",
"髋关节"
]
# for v in de_organ_names:
# print(v)
# cp_img(v)
# img2()
# froi('胸部','肺炎')
# froi2()
names = [
# 'Copyright(C) SUZHOUHEYI MEDICAL',
# 'Copyright(C)E-COM',
# 'CT Q560a',
# 'DBC DR',
# 'E-COM DR-2000 VET Digital Radiography Operating Console Software',
# 'E-COM DR-2000 VET Digital Radiography Operating Console Software v6.0',
# 'E-COM DR-2000 VET Êý×ÖXÉäÏßÊÞÓÃϵͳ¿ØÖÆÈí¼þ v6.0',
# 'E-COM DR-2000 VET 数字X射线兽用系统控制软件 v6.0',
# 'ELITE 2000',
# 'Monet64',
# 'Quantum CT Q560a',
# 'Quantum CT T752',
'RayNova DR',
'Supernova C5',
'Superpet',
'VanGogh P8',
'VanGogh SC8',
'Xmaru Series'
]
# for v in names2:
#
froi24()
# image_path = '/Users/haoyanbin/Downloads/28bdb020-05eb73c5-df89b21c-df391c83-5022fd3d.png'
# image_path2 = '/Users/haoyanbin/Desktop/db0d2877-16a5787c-4c8d67c4-071097fc-deee3b7e.png'
# froi31(image_path)
# froi31(image_path2)
import docx
import os, re
def get_pictures(word_path, result_path):
"""
图片提取
:param word_path: word路径
:return:
"""
try:
doc = docx.Document(word_path)
dict_rel = doc.part._rels
for rel in dict_rel:
rel = dict_rel[rel]
if "image" in rel.target_ref:
if not os.path.exists(result_path):
os.makedirs(result_path)
img_name = re.findall("/(.*)", rel.target_ref)[0]
word_name = os.path.splitext(word_path)[0]
if os.sep in word_name:
new_name = word_name.split('\\')[-1]
else:
new_name = word_name.split('/')[-1]
img_name = f'{new_name}-'+'.'+f'{img_name}'
with open(f'{result_path}/{img_name}', "wb") as f:
f.write(rel.target_part.blob)
except:
pass
if __name__ == '__main__':
#获取文件夹下的word文档列表,路径自定义
word_path = './全国执业兽医资格考试过关必做3000题.docx'
result_path = './docpic'
# os.chdir(word_path)
# spam=os.listdir(os.getcwd())
# for i in spam:
get_pictures(word_path,result_path)
import docx
import os, re
# with open('shouyi3000.txt', 'r', encoding='utf-8') as file:
# data = file.read()
data = """
71.《中华人民共和国畜牧法》自施行()。 A.2005年7月1日 B.2006年7月1日 C.2007年7月1日 D.2008年7月1日 E.2009年7月1日 起
72.《生猪屠宰检疫规范》规定,日屠宰量在500头以上的屠宰场,检疫室面积不能低于_)。 A.10 B.15 C.20 D.30 E.35 平方米(
73.《生猪屠宰检疫规范》规定,屠宰场应距离居民区、地表水源、交通干线以及生猪交易市场_米以上()。 A.100 B.200 C.300 D.500 E.3000
74.《动物检疫管理办法》规定,动物检疫合格证明有效期最长为_天,赛马等特殊用途的动物,检疫合格证明有效期可延长至20天()。 A.5 B.7 C.15 D.20 E.30
75.《动物检疫管理办法》规定,动物、动物产品出售或调运离开产地前必须由动物检疫员实施产地检疫,动物产品、供屠宰或者育肥的动物提前检()。 A.1 B.2 C.3 D.5 E.15 天报
76.《重大动物疫情应急条例》的生效日期是()。 A.2005年11月16日 B.2005年11月18日 C.2005年12月1日 D.2006年1月1日 E.2007年1月1日
77.《重大动物疫情应急条例》的立法目的是()。 A.迅速控制、扑灭重大动物疫情 B.保障养殖业安全生产 C.保障公众身体健康与生命安全 D.维护正常社会秩序 E.以上都是
"""
# 匹配每个问题及其选项的正则表达式
pattern = re.compile(r'(\d+)\.(.*?)\s+\(\)\s*(A\..*?)(?=\d+\.|\Z)', re.S)
# 匹配每个选项的正则表达式
option_pattern = re.compile(r'(A|B|C|D|E)\.(.*?)(?=(A|B|C|D|E)\.|$)', re.S)
questions = []
for match in pattern.finditer(data):
question_number = match.group(1)
question_text = match.group(2).strip()
options_text = match.group(3).strip()
options = {opt.group(1): opt.group(2).strip() for opt in option_pattern.finditer(options_text)}
question = {
"question_number": question_number,
"question_text": question_text,
"options": options
}
questions.append(question)
for question in questions:
print(question)
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -3,5 +3,5 @@ from app import create_app ...@@ -3,5 +3,5 @@ from app import create_app
app = create_app() app = create_app()
if __name__ == '__main__': if __name__ == '__main__':
app.run(host='0.0.0.0', port=5080, debug=True) # app.run(host='0.0.0.0', port=5080, debug=True)
# app.run(port=5080, debug=True) app.run(port=5080, debug=True)
from pymysql import connect, cursors
import requests
import json
dbconn = connect(
host='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
port=3306,
user='ai_root',
password='ai_root888',
db='medical_platform',
charset='utf8',
cursorclass=cursors.DictCursor)
def vetlas_ins():
url = 'http://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/collect/list'
result = requests.get(url=url)
rows = json.loads(result.text)
for row in rows:
url2 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/filter/list?collectName=Xray_LungPattern_v8.3&id='+row['id']
result2 = requests.get(url=url2)
rows2 = json.loads(result2.text)
url3 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/tupu/json_all?collectName=Xray_LungPattern_v8.3&collectId='+row['id']
result3 = requests.get(url=url3)
rows3 = json.loads(result3.text)
if len(row.select('font')) > 0:
continue
return '1'
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment