Commit b1c6043f authored by haoyanbin's avatar haoyanbin

1

parent ea514cf8
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
import re import re
from flask import Blueprint, request, json from flask import Blueprint, request, json
from app.model.base import dbconn, graph_driver from app.model.base import dbconn, graph_driver
from app.utils.alioss import AliyunOss
import time
import random
bp = Blueprint('search', __name__, url_prefix='/search') bp = Blueprint('search', __name__, url_prefix='/search')
...@@ -18,6 +21,16 @@ for val_keyword in rows: ...@@ -18,6 +21,16 @@ for val_keyword in rows:
keyword_clinical[val_keyword['keyword']] = val_keyword['clinical'] keyword_clinical[val_keyword['keyword']] = val_keyword['clinical']
@bp.route('/upload', methods=['POST', 'GET'])
def upload():
filename = "seg"+time.strftime("%Y%m%d%H%M%S", time.localtime())+str(random.randint(1000, 9999))+".png"
file = "/Users/haoyanbin/Desktop/WechatIMG24.jpeg"
img_url = AliyunOss().put_object_from_file(filename, file)
print(img_url)
return json.dumps(img_url)
@bp.route('/illness_search', methods=['POST', 'GET']) @bp.route('/illness_search', methods=['POST', 'GET'])
def illness_search(): def illness_search():
print(111) print(111)
......
...@@ -19,6 +19,14 @@ dbconn2 = MYSQL( ...@@ -19,6 +19,14 @@ dbconn2 = MYSQL(
dbcharset='utf8' dbcharset='utf8'
) )
dbconn3 = MYSQL(
dbhost='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
dbport=3306,
dbuser='dbc_saas',
dbpwd='dbc_saas888888',
dbname='medical_platform',
dbcharset='utf8'
)
# graph = Graph("", auth=("neo4j", "11111111")) # graph = Graph("", auth=("neo4j", "11111111"))
# graph = Graph("bolt://gds-2zeyv40mi12s6mjc149870pub.graphdb.rds.aliyuncs.com:3734", auth=("dbc_tair", "dbc_tair888888")) # graph = Graph("bolt://gds-2zeyv40mi12s6mjc149870pub.graphdb.rds.aliyuncs.com:3734", auth=("dbc_tair", "dbc_tair888888"))
......
import oss2
class AliyunOss(object):
def __init__(self):
self.access_key_id = "LTAI5t91PkMfeZSckddWNxiT" # 从阿里云查询到的 AccessKey 的ID
self.access_key_secret = "1MblQ0r6w9LC3tlR5O1zNxnDQKKbjH" # 从阿里云查询到的 AccessKey 的Secret
self.auth = oss2.Auth(self.access_key_id, self.access_key_secret)
self.bucket_name = "dbc-static" # 阿里云上创建好的Bucket的名称
self.endpoint = "oss-cn-beijing.aliyuncs.com" # 阿里云从Bucket中查询到的endpoint
self.dir = "ai-chest/" # 阿里云从Bucket中查询到的endpoint
self.bucket = oss2.Bucket(self.auth, self.endpoint, self.bucket_name)
# name: 在阿里云Bucket中要保存的文件名
# file: 本地图片的文件名
def put_object_from_file(self, name, file):
self.bucket.put_object_from_file(self.dir+name, file)
return "https://{}.{}/{}".format(self.dir+self.bucket_name, self.endpoint, name)
import time
import requests
import re
class GoogleTranslate(object):
def __init__(self, sl='auto', tl='', domainnames=""):
"""
A python wrapped free and unlimited API for Google Translate.
:param sl:from Language
:param tl:to Language
:param domainnames: google domainnames, for example if domainnames="com" ,the url is "translate.google.com". In China the com domainnames is blocked by GFW,you can use "cn".
"""
self.sl = sl
self.tl = tl
self.hl = tl
if domainnames == "":
self.domainnames ="com"
else:
self.domainnames = domainnames
self.TKK = getTKK(domainnames=self.domainnames)
def _returnintorzero(self, d):
try:
temp = int(d)
except Exception as e:
temp = 0
return temp
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = (a % 0x100000000) >> d if '+' == b[c + 1] else a << d
a = a + d & 4294967295 if '+' == b[c] else a ^ d
c += 3
return a
def trans(self, text):
"""
translate text
:param text: The text to be translate
:return:
"""
tk = self._gettk(text)
timeh = int(time.time() / 3600)
if self.TKK.split(".")[0] != timeh:
self.TKK = getTKK(domainnames=self.domainnames)
data = {
"client": 'webapp',
"sl": self.sl,
"tl": self.tl,
"hl": self.hl,
"dt": ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'],
"ie": 'UTF-8',
"oe": 'UTF-8',
"otf": 1,
"ssel": 0,
"tsel": 0,
"kc": 7,
"q": text,
"tk": tk
}
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br"}
url = 'https://translate.google.'+self.domainnames+'/translate_a/single'
jsonres = requests.get(url=url, headers=headers, params=data)
lines = ''
try:
for i in jsonres.json()[0]:
if i:
if i[0]:
lines = lines + i[0]
except Exception as e:
print("失败语句:")
print(text)
print("tk:")
print(e)
print('实际返回信息:')
print(jsonres.text)
raise Exception(text)
return lines
def _gettk(self, a):
d = self.TKK.split(".")
b = int(d[0])
e = []
for g in range(len(a)):
l = ord(a[g])
if 128 > l:
e.append(l)
else:
if 2048 > l:
e.append(l >> 6 | 192)
else:
if (55296 == (l & 64512) and g + 1 < len(a) and 56320 == (ord(a[g + 1]) & 64512)):
l = 65536 + ((l & 1023) << 10) + (a.charCodeAt(++g) & 1023)
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128)
e.append(l & 63 | 128)
a = b
for f in range(len(e)):
a = a + int(e[f])
a = self._xr(a, "+-a^+6")
a = self._xr(a, "+-3^+b+-f");
a ^= self._returnintorzero(d[1])
if 0 > a:
a = (a & 2147483647) + 2147483648
a %= 1E6
return str(int(a)) + "." + str(int(a) ^ b)
def getTKK(domainnames=""):
if domainnames == "":
url = "https://translate.google.com/"
else:
url = "https://translate.google." + domainnames + "/"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br"}
googleindexpage = requests.get(url, headers=headers).text
tkk = re.findall("tkk:'(\d*\.\d*)'", googleindexpage)
if len(tkk) != 0:
print(tkk[0])
return tkk[0]
else:
return None
if __name__ == '__main__':
# pass
# # This is an example.
translator = GoogleTranslate(domainnames="cn", tl="zh-CN")
text_origin = "Guía de servicios notariales y consulares en Bolivia. £100"
print(translator.trans(text_origin))
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
...@@ -67,6 +67,26 @@ class MYSQL: ...@@ -67,6 +67,26 @@ class MYSQL:
self.connection.commit() self.connection.commit()
return last_id return last_id
def insert_all(self, table, data):
"""mysql insert() function"""
with self.connection.cursor() as cursor:
params = self.join_field_value(data[0])
sql = "INSERT IGNORE INTO {table} SET {params}".format(
table=table, params=params)
ins_data = []
for v in data:
ins_data.append(tuple(v.values()))
cursor.executemany(sql, ins_data)
# last_id = self.connection.insert_id()
self.connection.commit()
return
def delete(self, table, condition=None, limit=None): def delete(self, table, condition=None, limit=None):
""" """
mysql delete() function mysql delete() function
...@@ -201,7 +221,7 @@ class MYSQL: ...@@ -201,7 +221,7 @@ class MYSQL:
return cursor.fetchone() if fetchone else cursor.fetchall() return cursor.fetchone() if fetchone else cursor.fetchall()
def query(self, sql, fetchone=False, execute=False): def query(self, sql, fetchone=False, execute=False):
print(sql) # print(sql)
"""execute custom sql query""" """execute custom sql query"""
with self.connection.cursor() as cursor: with self.connection.cursor() as cursor:
cursor.execute(sql) cursor.execute(sql)
......
This diff is collapsed.
import docx
import os, re
def get_pictures(word_path, result_path):
"""
图片提取
:param word_path: word路径
:return:
"""
try:
doc = docx.Document(word_path)
dict_rel = doc.part._rels
for rel in dict_rel:
rel = dict_rel[rel]
if "image" in rel.target_ref:
if not os.path.exists(result_path):
os.makedirs(result_path)
img_name = re.findall("/(.*)", rel.target_ref)[0]
word_name = os.path.splitext(word_path)[0]
if os.sep in word_name:
new_name = word_name.split('\\')[-1]
else:
new_name = word_name.split('/')[-1]
img_name = f'{new_name}-'+'.'+f'{img_name}'
with open(f'{result_path}/{img_name}', "wb") as f:
f.write(rel.target_part.blob)
except:
pass
if __name__ == '__main__':
#获取文件夹下的word文档列表,路径自定义
word_path = './全国执业兽医资格考试过关必做3000题.docx'
result_path = './docpic'
# os.chdir(word_path)
# spam=os.listdir(os.getcwd())
# for i in spam:
get_pictures(word_path,result_path)
import docx
import os, re
# with open('shouyi3000.txt', 'r', encoding='utf-8') as file:
# data = file.read()
data = """
71.《中华人民共和国畜牧法》自施行()。 A.2005年7月1日 B.2006年7月1日 C.2007年7月1日 D.2008年7月1日 E.2009年7月1日 起
72.《生猪屠宰检疫规范》规定,日屠宰量在500头以上的屠宰场,检疫室面积不能低于_)。 A.10 B.15 C.20 D.30 E.35 平方米(
73.《生猪屠宰检疫规范》规定,屠宰场应距离居民区、地表水源、交通干线以及生猪交易市场_米以上()。 A.100 B.200 C.300 D.500 E.3000
74.《动物检疫管理办法》规定,动物检疫合格证明有效期最长为_天,赛马等特殊用途的动物,检疫合格证明有效期可延长至20天()。 A.5 B.7 C.15 D.20 E.30
75.《动物检疫管理办法》规定,动物、动物产品出售或调运离开产地前必须由动物检疫员实施产地检疫,动物产品、供屠宰或者育肥的动物提前检()。 A.1 B.2 C.3 D.5 E.15 天报
76.《重大动物疫情应急条例》的生效日期是()。 A.2005年11月16日 B.2005年11月18日 C.2005年12月1日 D.2006年1月1日 E.2007年1月1日
77.《重大动物疫情应急条例》的立法目的是()。 A.迅速控制、扑灭重大动物疫情 B.保障养殖业安全生产 C.保障公众身体健康与生命安全 D.维护正常社会秩序 E.以上都是
"""
# 匹配每个问题及其选项的正则表达式
pattern = re.compile(r'(\d+)\.(.*?)\s+\(\)\s*(A\..*?)(?=\d+\.|\Z)', re.S)
# 匹配每个选项的正则表达式
option_pattern = re.compile(r'(A|B|C|D|E)\.(.*?)(?=(A|B|C|D|E)\.|$)', re.S)
questions = []
for match in pattern.finditer(data):
question_number = match.group(1)
question_text = match.group(2).strip()
options_text = match.group(3).strip()
options = {opt.group(1): opt.group(2).strip() for opt in option_pattern.finditer(options_text)}
question = {
"question_number": question_number,
"question_text": question_text,
"options": options
}
questions.append(question)
for question in questions:
print(question)
This diff is collapsed.
...@@ -3,5 +3,5 @@ from app import create_app ...@@ -3,5 +3,5 @@ from app import create_app
app = create_app() app = create_app()
if __name__ == '__main__': if __name__ == '__main__':
app.run(host='0.0.0.0', port=5080, debug=True) # app.run(host='0.0.0.0', port=5080, debug=True)
# app.run(port=5080, debug=True) app.run(port=5080, debug=True)
from pymysql import connect, cursors
import requests
import json
dbconn = connect(
host='rm-2zepcf8kag0aol0q48o.mysql.rds.aliyuncs.com',
port=3306,
user='ai_root',
password='ai_root888',
db='medical_platform',
charset='utf8',
cursorclass=cursors.DictCursor)
def vetlas_ins():
url = 'http://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/collect/list'
result = requests.get(url=url)
rows = json.loads(result.text)
for row in rows:
url2 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/filter/list?collectName=Xray_LungPattern_v8.3&id='+row['id']
result2 = requests.get(url=url2)
rows2 = json.loads(result2.text)
url3 = 'https://express-s72m-101984-5-1325696513.sh.run.tcloudbase.com/mini/tupu/json_all?collectName=Xray_LungPattern_v8.3&collectId='+row['id']
result3 = requests.get(url=url3)
rows3 = json.loads(result3.text)
if len(row.select('font')) > 0:
continue
return '1'
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment