This commit is contained in:
Refound-445 2024-12-09 15:53:18 +08:00
parent 7533dde1a1
commit 480667ecc5
7 changed files with 38 additions and 20 deletions

View File

@ -192,8 +192,8 @@ pip install nonebot-plugin-nailongremove-base -U
| **全局配置** | | | |
| `PROXY` | 否 | `None` | 下载模型等文件时使用的代理地址 |
| **响应配置** | | | |
| `NAILONG_BYPASS_SUPERUSER` | 否 | `True` | 是否不检查超级用户发送的图片 |
| `NAILONG_BYPASS_ADMIN` | 否 | `True` | 是否不检查群组管理员发送的图片 |
| `NAILONG_BYPASS_SUPERUSER` | 否 | `False` | 是否不检查超级用户发送的图片 |
| `NAILONG_BYPASS_ADMIN` | 否 | `False` | 是否不检查群组管理员发送的图片 |
| `NAILONG_NEED_ADMIN` | 否 | `False` | 当自身不为群组管理员时是否不检查群内所有图片 |
| `NAILONG_LIST_SCENES` | 否 | `[]` | 聊天场景 ID 黑白名单列表<br />在单级聊天下为该聊天 ID如 QQ 群号;<br />在多级聊天下为以 `_` 分割的各级聊天 ID如频道下的子频道或频道下私聊 |
| `NAILONG_BLACKLIST` | 否 | `True` | 是否使用黑名单模式 |
@ -231,6 +231,7 @@ pip install nonebot-plugin-nailongremove-base -U
- `0`:基于 Renet50 图像分类模型训练推理,感谢 [@spawner1145](https://github.com/spawner1145) 提供的模型,原链接:[spawner1145/NailongRecognize](https://github.com/spawner1145/NailongRecognize.git)
- `1`:基于 YOLOX 目标检测模型训练推理,感谢 [@NKXingXh](https://github.com/nkxingxh) 提供的模型,原链接:[nkxingxh/NailongDetection](https://github.com/nkxingxh/NailongDetection)
- `2`:基于 YOLOv11 目标检测模型训练推理,感谢 [@Hakureirm](https://github.com/Hakureirm) 提供的模型,原链接:[Hakureirm/NailongKiller](https://huggingface.co/Hakureirm/NailongKiller)
- `3`:基于 YOLOv11 目标检测模型训练推理,感谢 [@Threkork](https://github.com/Threkork) 提供的模型,原链接:[Threkork/kovi-plugin-check-alllong](https://github.com/Threkork/kovi-plugin-check-alllong),建议`NAILONG_MODEL1_SCORE`配置项中设置`{"nailong": 0.78}``NAILONG_MODEL1_YOLOX_SIZE`设置为`[640,640]`
### 消息模板可用变量
@ -259,6 +260,12 @@ pip install nonebot-plugin-nailongremove-base -U
## 📝 更新日志
### 2.3.4
- `NAILONG_MODEL`加入model3基于YOLOv11训练的模型建议`NAILONG_MODEL1_SCORE`配置项中设置`{"nailong": 0.78}``NAILONG_MODEL1_YOLOX_SIZE`设置为`[640,640]`
- 更新配置项默认值`NAILONG_BYPASS_SUPERUSER`->`False``NAILONG_BYPASS_ADMIN`->`False`
### 2.3.3
- 优化临时处理方案减小性能压力同时提升速度向量库faiss也支持GPU处理但非专业人士不推荐使用GPU因为这个安装过程比较复杂

View File

@ -9,13 +9,13 @@ require("nonebot_plugin_uninfo")
from . import handler as handler
from .config import Config
__version__ = "2.3.3.post1"
__version__ = "2.3.4"
__plugin_meta__ = PluginMetadata(
name="自动撤回奶龙",
description="一个基于图像分类模型的简单插件~",
usage="只要群内有人发奶龙就会被撤回",
type="application",
homepage="https://github.com/Refound-445/onoebot-plugin-nailongremove",
homepage="https://github.com/Refound-445/nonebot-plugin-nailongremove",
config=Config,
supported_adapters=inherit_supported_adapters(
"nonebot_plugin_alconna",

View File

@ -14,6 +14,8 @@ class ModelType(int, Enum):
CLASSIFICATION = 0
TARGET_DETECTION = 1
HF_DETECTION = 2
HF_YOLO = 3
class Model1Type(StrEnum):
@ -31,8 +33,8 @@ class Model1Type(StrEnum):
class Config(BaseModel):
proxy: Optional[str] = None
nailong_bypass_superuser: bool = True
nailong_bypass_admin: bool = True
nailong_bypass_superuser: bool = False
nailong_bypass_admin: bool = False
nailong_need_admin: bool = False
nailong_list_scenes: List[str] = Field(default_factory=list)
nailong_blacklist: bool = True

View File

@ -34,7 +34,7 @@ elif config.nailong_model is ModelType.TARGET_DETECTION:
"and use CPU to compute.",
) from e
elif config.nailong_model is ModelType.HF_DETECTION:
elif config.nailong_model is ModelType.HF_DETECTION or config.nailong_model is ModelType.HF_YOLO:
from .hf_detection import check as check
else:

View File

@ -49,6 +49,7 @@ def check_single(image: np.ndarray, is_gif: bool = False) -> CheckSingleResult[N
image = image.unsqueeze(0) # type: ignore
with torch.no_grad():
output = model(image.to(device)) # type: ignore
print(output)
_, pred = torch.max(output, 1)
return CheckSingleResult(
ok=pred.item() == 1,

View File

@ -11,11 +11,11 @@ from nonebot import logger
from nonebot.utils import run_sync
from PIL import Image
from ..config import config
from ..config import config, ModelType
from ..frame_source import FrameSource, repack_save
from .utils.common import CheckResult, CheckSingleResult, race_check, similarity_process
if config.nailong_model2_online:
if config.nailong_model2_online and config.nailong_model is ModelType.HF_DETECTION:
import base64
import io
import shutil
@ -29,8 +29,14 @@ else:
from huggingface_hub import hf_api, hf_hub_download
from ultralytics import YOLO
REPO_ID = "Hakureirm/NailongKiller"
FILENAME = "nailong_yolo11.pt"
if config.nailong_model is ModelType.HF_DETECTION:
REPO_ID = "Hakureirm/NailongKiller"
FILENAME = "nailong_yolo11.pt"
elif config.nailong_model is ModelType.HF_YOLO:
REPO_ID = "Threkork/nailong"
FILENAME = "nailong-2024-10-16.pt"
else:
raise NotImplementedError # never reach here
model_path = os.path.join(str(config.nailong_model_dir), FILENAME)
if config.nailong_auto_update_model or not os.path.exists(model_path):
@ -38,6 +44,7 @@ else:
file_path = os.path.join(str(config.nailong_model_dir), FILENAME)
model_info = api.model_info(REPO_ID)
def get_file_last_modified_time(file_path):
try:
timestamp = os.path.getmtime(file_path)
@ -49,6 +56,7 @@ else:
except FileNotFoundError:
return None
local_time = get_file_last_modified_time(file_path)
if local_time is None or model_info.last_modified >= local_time:
hf_hub_download(
@ -76,7 +84,7 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
input_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if not os.path.exists(
os.path.join(str(config.nailong_model_dir), "online_temp"),
os.path.join(str(config.nailong_model_dir), "online_temp"),
):
os.makedirs(os.path.join(str(config.nailong_model_dir), "online_temp"))
image_path = os.path.join(
@ -100,8 +108,8 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
)
os.remove(image_path)
if (
"检测到的目标数量: " in result_info
and int(result_info.split("检测到的目标数量: ")[1].split("\n")[0]) < 1
"检测到的目标数量: " in result_info
and int(result_info.split("检测到的目标数量: ")[1].split("\n")[0]) < 1
):
return CheckSingleResult(ok=False, label=None, extra=frame)
if isinstance(result_image, str):
@ -142,9 +150,9 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
if pad_w > 0 or pad_h > 0:
result_img = result_img[
pad_h // 2 : pad_h // 2 + original_size[1],
pad_w // 2 : pad_w // 2 + original_size[0],
]
pad_h // 2: pad_h // 2 + original_size[1],
pad_w // 2: pad_w // 2 + original_size[0],
]
return CheckSingleResult(ok=True, label="nailong", extra=result_img)

View File

@ -21,6 +21,7 @@ T = TypeVar("T")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if config.nailong_similarity_on:
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
import json
import faiss
@ -30,7 +31,6 @@ if config.nailong_similarity_on:
from torch import nn
from torchvision import transforms
import sklearn
transform = transforms.Compose(
[
transforms.ToTensor(),
@ -47,7 +47,7 @@ if config.nailong_similarity_on:
):
def __init__(self):
super().__init__()
self.resnet = torchvision.models.resnet18(pretrained=False)
self.resnet = torchvision.models.resnet18(weights=None)
self.resnet.fc = nn.Linear(
self.resnet.fc.in_features,
5,
@ -57,7 +57,7 @@ if config.nailong_similarity_on:
return self.resnet(x)
features_model = MyModel.from_pretrained(
"refoundd/NailongFeatures",
"refoundd/NailongFeatures",cache_dir=config.nailong_model_dir
).to(device)
index_path = config.nailong_model_dir / "records.index"
json_path = config.nailong_model_dir / "records.json"