赞
踩
YOLOV5是一个基于视觉识别的开源项目,本人制作的目的是研究YOLO的应用,并非制作游戏外挂。
没时间进行游戏实测画面,暂时放一个其他人的效果吧
【APEX目标识别 2000张图的训练结果-哔哩哔哩】
import argparse
import os
import time
import cv2
import numpy as np
import pynput
import torch
import win32con
import win32gui
from simple_pid import PID
from aim_csgo.apex_aim import lock
from aim_csgo.cs_model import load_model
from aim_csgo.screen_inf import (get_parameters, grab_screen_mss,
grab_screen_win32)
from aim_csgo.verify_args import verify_args
from utils.augmentations import letterbox
from utils.general import non_max_suppression, scale_coords, xyxy2xywh
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', type=str, default='weights/best.pt', help='模型位置 model address')
parser.add_argument('--imgsz', type=int, default=640, help='和训练模型时imgsz一样')
parser.add_argument('--conf-thres', type=float, default=0.1, help='置信度阈值')
parser.add_argument('--iou-thres', type=float, default=0.45, help='交並比閥值')
parser.add_argument('--use-cuda', type=bool, default=False, help='是否使用cuda')
parser.add_argument('--show-window', type=bool, default=True, help='是否显示检测窗口(debug用,若是True,不要去點右上角的X)')
parser.add_argument('--top-most', type=bool, default=True, help='是否保持检测窗口在最上层')
parser.add_argument('--resize-window', type=float, default=1/2, help='缩放窗口大小')
parser.add_argument('--thickness', type=int, default=2, help='边框线条粗细,需大于1/resize-window')
parser.add_argument('--show-fps', type=bool, default=False, help='是否显示fps')
parser.add_argument('--show-label', type=bool, default=True, help='是否显示标签')
parser.add_argument('--use_mss', type=str, default=True, help='是否使用mss截屏;为False時使用win32截屏;笔记本用True')
parser.add_argument('--region', type=tuple, default=(0.4, 0.4), help='检测范围;分别为x轴和y轴,(1.0, 1.0)表示全屏检测,越低检测值范围越小(以屏幕中心为检测中心)')
parser.add_argument('--hold-lock', type=bool, default=False, help='lock模式;True为按住,False为切換')
parser.add_argument('--lock-sen', type=float, default= 3.0, help='lock幅度系數,遊戲中灵敏度(建议不要调整)')
parser.add_argument('--lock-smooth', type=float, default=1.9, help='lock平滑系数;越大越平滑')
parser.add_argument('--lock-button', type=str, default='right', help='lock按键;只支持鼠标按键,调整自瞄键[left, middle, right, x1, x2]')
parser.add_argument('--head-first', type=bool, default=True, help='是否优先检测头部')
parser.add_argument('--lock-tag', type=list, default=[0], help='对应标签;person(倘若模型不同,请自行修改对应标签)')
parser.add_argument('--lock-choice', type=list, default=[0], help='目标选择;决定锁定的目标,从自己标签选择')
args = parser.parse_args()
'------------------------------------------------------------------------------------'
verify_args(args)
cur_dir = os.path.dirname(os.path.abspath(__file__)) + '\\'
args.model_path = cur_dir + args.model_path
args.lock_tag = [str(i) for i in args.lock_tag]
args.lock_choice = [str(i) for i in args.lock_choice]
device = 'cuda' if args.use_cuda else 'cpu'
half = device != 'cpu'
imgsz = args.imgsz
conf_thres = args.conf_thres
iou_thres = args.iou_thres
top_x, top_y, x, y = get_parameters()
len_x, len_y = int(x * args.region[0]), int(y * args.region[1])
top_x, top_y = int(top_x + x // 2 * (1. - args.region[0])), int(top_y + y // 2 * (1. - args.region[1]))
monitor = {'left': top_x, 'top': top_y, 'width': len_x, 'height': len_y}
model = load_model(args)
stride = int(model.stride.max())
names = model.module.names if hasattr(model, 'module') else model.names
lock_mode = False
team_mode = True
lock_button = eval('pynput.mouse.Button.' + args.lock_button)
mouse = pynput.mouse.Controller()
#pid系数可自行调整(一般960显卡参数:kp = 0.5,ki = 0.17,kd = 0.03)
pidx = PID(0.5, 0.17, 0.03, setpoint=0, sample_time=0.001,)
pidy = PID(1.22, 0.12, 0.0, setpoint=0, sample_time=0.001,)
pidx.output_limits = (-4000 ,4000)
pidy.output_limits = (-3000 ,3000)
if args.show_window:
cv2.namedWindow('aim', cv2.WINDOW_NORMAL)
cv2.resizeWindow('aim', int(len_x * args.resize_window), int(len_y * args.resize_window))
def on_click(x, y, button, pressed):
global lock_mode
if button == lock_button:
if args.hold_lock:
if pressed:
lock_mode = True
print('locking...')
else:
lock_mode = False
print('lock mode off')
else:
if pressed:
lock_mode = not lock_mode
print('lock mode', 'on' if lock_mode else 'off')
listener = pynput.mouse.Listener(on_click=on_click)
listener.start()
print('enjoy yourself!')
t0 = time.time()
cnt = 0
while True:
if cnt % 20 == 0:
top_x, top_y, x, y = get_parameters()
len_x, len_y = int(x * args.region[0]), int(y * args.region[1])
top_x, top_y = int(top_x + x // 2 * (1. - args.region[0])), int(top_y + y // 2 * (1. - args.region[1]))
monitor = {'left': top_x, 'top': top_y, 'width': len_x, 'height': len_y}
cnt = 0
if args.use_mss:
img0 = grab_screen_mss(monitor)
img0 = cv2.resize(img0, (len_x, len_y))
else:
img0 = grab_screen_win32(region=(top_x, top_y, top_x + len_x, top_y + len_y))
img0 = cv2.resize(img0, (len_x, len_y))
img = letterbox(img0, imgsz, stride=stride)[0]
img = img.transpose((2, 0, 1))[::-1]
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float()
img /= 255.
if len(img.shape) == 3:
img = img[None]
pred = model(img, augment=False, visualize=False)[0]
pred = non_max_suppression(pred, conf_thres, iou_thres, agnostic=False)
aims = []
for i, det in enumerate(pred):
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
for *xyxy, conf, cls in reversed(det):
# bbox:(tag, x_center, y_center, x_width, y_width)
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh) # label format
aim = ('%g ' * len(line)).rstrip() % line
aim = aim.split(' ')
aims.append(aim)
if len(aims):
if lock_mode:
lock(aims, mouse, top_x, top_y, len_x, len_y, args, pidx, pidy)
if args.show_window:
for i, det in enumerate(aims):
tag, x_center, y_center, width, height = det
x_center, width = len_x * float(x_center), len_x * float(width)
#print("width:" , width)
#print("x_center:", x_center)
y_center, height = len_y * float(y_center), len_y * float(height)
top_left = (int(x_center - width / 2.), int(y_center - height / 2.))
#print("top_left:", top_left)
bottom_right = (int(x_center + width / 2.), int(y_center + height / 2.))
#print("bottom_right:", bottom_right)
cv2.rectangle(img0, top_left, bottom_right, (0, 255, 0), thickness=args.thickness)
if args.show_label:
cv2.putText(img0, tag, top_left, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (235, 0, 0), 4)
if args.show_window:
if args.show_fps:
cv2.putText(img0,"FPS:{:.1f}".format(1. / (time.time() - t0)), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 235), 4)
# cv2.putText(img0, "lock:{:.1f}".format(lock_mode), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2,(0, 0, 235), 4)
# cv2.putText(img0, "team:{:.1f}".format(team_mode), (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 235), 4)
print(1. / (time.time() - t0))
t0 = time.time()
cv2.imshow('aim', img0)
if args.top_most:
hwnd = win32gui.FindWindow(None, 'aim')
CVRECT = cv2.getWindowImageRect('aim')
win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
cv2.waitKey(1)
pidx(0)
pidy(0)
cnt += 1
1.下载安装罗技驱动,并不需要买一个罗技鼠标。
2.安装必要的python库
3.运行即可
修改这个代码可以选择是否开启和关闭功能
parser.add_argument('--lock-button', type=str, default='right', help='lock按键;只支持鼠标按键,调整自瞄键[left, middle, right, x1, x2]')
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。