赞
踩
本代码理论上可以通杀市面上所有滑块(如果真出现了识别不到,请自行根据文章末尾的参考内容,自己进行调教)
在线测试地址:simple_ocr
截至2022.7.13,可以破解的有:
1、顶象
2、网易易盾
以下是识别代码:
from io import BytesIO
import onnxruntime
import torch
import torchvision
import numpy as np
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import time
import cv2
import base64
from selenium.webdriver import ActionChains
import random
from PIL import Image
from lxml import etree
import requests
def padded_resize(im, new_shape=(640, 640), stride=32):
try:
shape = im.shape[:2]
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
# dw, dh = np.mod(dw, stride), np.mod(dh, stride)
dw /= 2
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)) # add border
# Convert
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im)
im = torch.from_numpy(im)
im = im.float()
im /= 255
im = im[None]
im = im.cpu().numpy() # torch to numpy
return im
except:
print("123")
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def box_iou(box1, box2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
lb = labels[xi]
v = torch.zeros((len(lb), nc + 5), device=x.device)
v[:, :4] = lb[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
break # time limit exceeded
return output
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w, h = self.font.getsize(label) # text width, height
outside = box[1] - h >= 0 # label fits outside box
self.draw.rectangle((box[0],
box[1] - h if outside else box[1],
box[0] + w + 1,
box[1] + 1 if outside else box[1] + h + 1), fill=color)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h - 3 >= 0 # label fits outside box
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
thickness=tf, lineType=cv2.LINE_AA)
def return_coordinates(xyxy, conf):
conf = float(conf.numpy())
gain = 1.02
pad = 10
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
c1, c2 = (int(xyxy[0, 0]) + 6, int(xyxy[0, 1]) + 6), (int(xyxy[0, 2]) - 6, int(xyxy[0, 3]) - 6)
# print(f"leftTop:{c1},rightBottom:{c2},Confidence:{conf*100}%")
result_dict = {"leftTop": c1, "rightBottom": c2, "Confidence": conf}
return result_dict
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def onnx_model_main(path):
# onnx
session = onnxruntime.InferenceSession("last.onnx", providers=["CPUExecutionProvider"])
start = time.time()
image = open(path, "rb").read()
img = np.array(Image.open(BytesIO(image)))
# img = cv2.imread(path)
# 图像处理
img = img[:, :, :3]
im = padded_resize(img)
# 模型调度
pred = session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: im})[0]
pred = torch.tensor(pred)
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.60, max_det=1000) # 大于百分之六十的置信度
coordinate_list = []
for i, det in enumerate(pred):
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], img.shape).round()
for *xyxy, conf, cls in reversed(det):
# 返回坐标和置信度
coordinates = return_coordinates(xyxy, conf)
coordinate_list.append(coordinates)
# 坐标列表
coordinate = sorted(coordinate_list, key=lambda a: a["Confidence"])
# 用时
duration = str((time.time() - start))
if len(coordinate) == 0:
data = {'message': 'error', 'time': duration}
else:
coordinate = coordinate[-1]
x = coordinate.get('leftTop')[0]
y = coordinate.get('leftTop')[1]
w = coordinate.get('rightBottom')[0] - coordinate.get('leftTop')[0]
h = coordinate.get('rightBottom')[1] - coordinate.get('leftTop')[1]
point = f"{x}|{y}|{w}|{h}"
data = {'message': 'success', 'time': duration, 'point': point}
data.update(coordinate)
print(data)
return data
def drow_rectangle(coordinate, path):
img = cv2.imread(path)
# 画框
result = cv2.rectangle(img, coordinate.get("leftTop"), coordinate.get("rightBottom"), (0, 0, 255), 2)
cv2.imwrite("drow_rectangle.jpg", result) # 返回圈中矩形的图片
print("返回坐标矩形成功")
调用方法:
coordinate_onnx = onnx_model_main("1.png")#需要识别的图片路径
#返回内容
#{'message': 'success', 'time': '0.5251204967498779', 'point': '260|90|59|56', 'leftTop': (260, 90), 'rightBottom': (319, 146), 'Confidence': 0.31054314970970154}
#
#这个是根据返回信息在同级目录下生成一个加了框的图片
drow_rectangle(coordinate_onnx, "1.png")#需要识别的图片路径
效果类似于这样子
附带个顶象的测试代码,基于selenium的:(这俩放一起就行,别问我为什么不是协议,问就是不会行了吧)
class Test():
def __init__(self):
option = Options()
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument('--disable-blink-features=AutomationControlled')
self.driver = webdriver.Chrome(options=option)
def __ease_out_expo(self, sep):
if sep == 1:
return 1
else:
return 1 - pow(2, -10 * sep)
def generate_tracks(self, distance):
"""
根据滑动距离生成滑动轨迹
:param distance: 需要滑动的距离
:return: 滑动轨迹<type 'list'>: [[x,y,t], ...]
x: 已滑动的横向距离
y: 已滑动的纵向距离, 除起点外, 均为0
t: 滑动过程消耗的时间, 单位: 毫秒
"""
if not isinstance(distance, int) or distance < 0:
raise ValueError(f"distance类型必须是大于等于0的整数: distance: {distance}, type: {type(distance)}")
# 初始化轨迹列表
slide_track = [
[random.randint(-50, -10), random.randint(-50, -10), 0],
[0, 0, 0],
]
# 共记录count次滑块位置信息
count = 30 + int(distance / 2)
# 初始化滑动时间
t = random.randint(50, 100)
# 记录上一次滑动的距离
_x = 0
_y = 0
for i in range(count):
# 已滑动的横向距离
x = round(self.__ease_out_expo(i / count) * distance)
# 滑动过程消耗的时间
t += random.randint(10, 20)
if x == _x:
continue
slide_track.append([x, _y, t])
_x = x
slide_track.append(slide_track[-1])
return slide_track
# 顶象测试
def dx_test(self):
url = 'https://www.dingxiang-inc.com/business/captcha?utm_source=baidu1sem&utm_medium=%E4%BA%A7%E5%93%81%E8%AF%8D&utm_campaign=%E4%BA%A7%E5%93%81%E8%AF%8D-%E9%AA%8C%E8%AF%81%E7%A0%81&utm_term=%E9%AA%8C%E8%AF%81%E7%A0%81&e_matchtype=1&e_keywordid=317912325143&bd_vid=9281303599717237405'
# Chrome浏览器
self.driver.get(url=url)
# //li[@class="item-2"]/h3
self.driver.maximize_window()
# 滑动到最底部
js_button = 'q=document.body.scrollTop=500'
# 执行js,滑动到最底部
self.driver.execute_script(js_button)
self.driver.find_element(By.XPATH,'//li[@class="item-2"]/h3').click()
time.sleep(2)
self.driver.find_element(By.XPATH,'//div[@id="dx_captcha_oneclick_bar-inform_3"]').click()
time.sleep(2)
self.download_yzm()
distence = onnx_model_main('1.png')
drow_rectangle(distence,'1.png')
time.sleep(1)
distence = int(distence['leftTop'][0])-30
source = self.driver.find_element(By.XPATH,'//div[@id="dx_captcha_basic_slider-img-animated-wrap_4"]/span[1]')
action = ActionChains(self.driver,duration=20)
action.click_and_hold(source)
a = 0
time.sleep(1)
for x in self.generate_tracks(distence):
print(x)
action.move_by_offset(xoffset=x[0] - a, yoffset=x[1])
a = x[0]
time.sleep(0.5)
action.release().perform()
input()
# 网易测试
def wy_test(self):
url = 'https://dun.163.com/trial/jigsaw' # Chrome浏览器
self.driver.get(url=url)
self.driver.maximize_window()
js_button = 'q=document.body.scrollTop=500'
# 执行js,滑动到最底部
self.driver.execute_script(js_button)
self.driver.find_element(By.XPATH,'//li[@class="tcapt-tabs__tab"][1]').click()
time.sleep(2)
self.driver.find_element(By.XPATH, '//button[@class="yidun_refresh"]').click()
html = self.driver.page_source
html = etree.HTML(html)
url = html.xpath('//img[@class="yidun_bg-img"]/@src')[0]
response = requests.get(url)
print(url)
with open('1.png', 'wb')as f:
f.write(response.content)
time.sleep(2)
input(11111111111111)
self.download_yzm()
distence = onnx_model_main('1.png')
drow_rectangle(distence,'1.png')
time.sleep(1)
distence = int(distence['leftTop'][0])-30
source = self.driver.find_element(By.XPATH,'//div[@id="dx_captcha_basic_slider-img-animated-wrap_4"]/span[1]')
action = ActionChains(self.driver,duration=20)
action.click_and_hold(source)
a = 0
time.sleep(1)
for x in self.generate_tracks(distence):
print(x)
action.move_by_offset(xoffset=x[0] - a, yoffset=x[1])
a = x[0]
time.sleep(0.5)
action.release().perform()
input()
# 测试代码用到的下载图片的东西
def download_yzm(self):
js = "return document.querySelector('.dx_captcha_basic_bg > canvas').toDataURL('image/png')"
image_data = self.driver.execute_script(js) # 执行js代码得到图片数据
data = image_data.split(',')[1]
image_data = base64.b64decode(data)
with open('1.png', 'wb') as f:
f.write(image_data)
if __name__ == '__main__':
a = Test()
a.dx_test()
# coordinate_onnx = onnx_model_main("1.png")
# drow_rectangle(coordinate_onnx, "1.png")
可能出现的错误:
1、xpath定位错误
2 、action = ActionChains(self.driver,duration=20) 这里报错
3、self.driver.find_element(By.XPATH,‘xxx"]’).click()这里报错
4、 self.driver = webdriver.Chrome(options=option)这里报错
5、环境问题
解决方法:
1、自己改改嘛,挺简单的。
2、删掉duration=20就好了 或 升级下selenium版本。
3、改成find_element_by_xpath(‘xxx’) 或 升级下selenium版本。
4、指定下chromedriver.exe位置 或 放在python目录下。
5、仅仅是用的话笔者提供的示例的话,应该没啥问题吧…应该,实在不行加我qq问吧(请我吃根棒棒糖就可以了!我这里棒棒糖仅需5毛!!!)
模型下载:
1、csdn下载:通用滑块验证码识别模型(我设置的0积分,如果收费了记得给我说下,或者直接加我,问我要也可以)
2、百度云下载: https://pan.baidu.com/s/1Nt2Z2Pu45RaQBs4dsS2kKg?pwd=82xg
提取码:82xg (老慢了)
3、github:暂无
百度云下载方式更新在了github
github: https://github.com/Bump-mann/simple_ocr
测试:http://121.4.108.95:8000/index/
参考:
1、深度学习训练滑动验证码(Yolov5)
2、【Yolov5】1.认真总结6000字Yolov5保姆级教程(全新版本2022.06.28)
---------------------------------------笔者碎碎念---------------------------------------
本来吧,是想先出旋转验证码的,代码都写好了,但是大家都想看滑块的,就先出滑块的啦…
开始做的时候想着就图片处理嘛,蛮简单的,结果…一家一个类型,参数没法写死,不同的图片不同的算法效果不稳定,全是坑,被逼无奈改成写模型了
什么?你问我代码能通杀吗?能,当然能,前提是你数据量够
观众老爷:你这不是欺负老人吗?标题党系内!
笔者:阿巴阿巴,如果催的人多的话,我会持续更新的,做到真正的通杀
本代码的数据标注都是笔者个人一个框一个框标注的,呜呜呜,我也想对接打码平台啊,穷啊
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。