This commit is contained in:
Refound-445 2024-11-27 22:50:55 +08:00
parent 81c647722a
commit d98f6bd411
8 changed files with 171 additions and 138 deletions

View File

@ -57,7 +57,7 @@ NaiLongRemove 是一款由简单的 AI 模型建立的奶龙识别插件,可
### 技术 ### 技术
目前插件支持种模型,可通过配置文件更换,详见文档下方配置一节。 目前插件支持种模型,可通过配置文件更换,详见文档下方配置一节。
用户可以根据需要自行选择心仪的模型,两个模型性能都已经经过优化,但仍可能会有不同程度的误差,也欢迎各位继续反馈给我们~ 用户可以根据需要自行选择心仪的模型,两个模型性能都已经经过优化,但仍可能会有不同程度的误差,也欢迎各位继续反馈给我们~
## 💿 安装 ## 💿 安装
@ -188,7 +188,7 @@ pip install nonebot-plugin-nailongremove-base -U
在 nonebot2 项目的 `.env` 文件中添加下表中的必填配置 在 nonebot2 项目的 `.env` 文件中添加下表中的必填配置
| 配置项 | 必填 | 默认值 | 说明 | | 配置项 | 必填 | 默认值 | 说明 |
|:-----------------------------------:| :--: |:-------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| |:--------------------------------:|:--:|:---------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| **全局配置** | | | | | **全局配置** | | | |
| `PROXY` | 否 | `None` | 下载模型等文件时使用的代理地址 | | `PROXY` | 否 | `None` | 下载模型等文件时使用的代理地址 |
| **响应配置** | | | | | **响应配置** | | | |
@ -202,14 +202,14 @@ pip install nonebot-plugin-nailongremove-base -U
| **行为配置** | | | | | **行为配置** | | | |
| `NAILONG_RECALL` | 否 | `True` | 是否撤回消息 | | `NAILONG_RECALL` | 否 | `True` | 是否撤回消息 |
| `NAILONG_MUTE_SECONDS` | 否 | `0` | 设置禁言时间,默认为 0 即不禁言<br/>单位:秒 | | `NAILONG_MUTE_SECONDS` | 否 | `0` | 设置禁言时间,默认为 0 即不禁言<br/>单位:秒 |
| `NAILONG_TIP` | 否 | `{"nailong": "本群禁止发奶龙!"}` | 发送的提示,使用 [Alconna 的消息模板](https://nonebot.dev/docs/best-practice/alconna/uniseg#%E4%BD%BF%E7%94%A8%E6%B6%88%E6%81%AF%E6%A8%A1%E6%9D%BF),可用变量见下,可以根据标签自定义对应值,如遇其中没有的标签会回退到 `nailong` | | `NAILONG_TIP` | 否 | `{"nailong": ["本群禁止发奶龙!"]}` | 发送的提示,使用 [Alconna 的消息模板](https://nonebot.dev/docs/best-practice/alconna/uniseg#%E4%BD%BF%E7%94%A8%E6%B6%88%E6%81%AF%E6%A8%A1%E6%9D%BF),可用变量见下,可以根据标签自定义对应值,随机发送列表其中一条消息,<br/>如遇其中没有的标签会回退到 `nailong`<br/>如果对应值为空列表`[]`,则会检测而不会发送消息 |
| `NAILONG_FAILED_TIP` | 否 | `{"nailong": "{:Reply($message_id)}呜,不要发奶龙了嘛 🥺 👉👈"}` | 撤回失败或禁用撤回时发送的提示,同上 | | `NAILONG_FAILED_TIP` | 否 | `{"nailong": ["{:Reply($message_id)}呜,不要发奶龙了嘛 🥺 👉👈"]}` | 撤回失败或禁用撤回时发送的提示,同上 |
| `NAILONG_CHECK_ALL_FRAMES` | 否 | `False` | 使用模型 1 时是否检查图片中的所有帧,需要同时设置`NAILONG_CHECK_MODE`为0启用该项后消息模板中的 `$checked_result` 变量当原图为动图时会变为动图 | | `NAILONG_CHECK_ALL_FRAMES` | 否 | `False` | 使用模型 1 时是否检查图片中的所有帧,需要同时设置`NAILONG_CHECK_MODE`为0启用该项后消息模板中的 `$checked_result` 变量当原图为动图时会变为动图 |
| `NAILONG_CHECK_MODE` | 否 | `0` | 选择对GIF动图的检测方式<br/>0.检测所有帧<br/>1.只检测第一帧<br/>2.随机抽帧检测 | | `NAILONG_CHECK_MODE` | 否 | `0` | 选择对GIF动图的检测方式<br/>0.检测所有帧<br/>1.只检测第一帧<br/>2.随机抽帧检测 |
| **相似度检测配置** | | | | | **相似度检测配置** | | | |
| `NAILONG_SIMILARITY_ON` | 否 | `False` | 是否启用处理图片前对本地存储进行相似度检测(该功能仍在更新中,目前可能耗能较大且处理较慢) | | `NAILONG_SIMILARITY_ON` | 否 | `False` | 是否启用处理图片前对本地存储进行相似度检测(该功能仍在更新中,目前可能耗能较大且处理较慢) |
| `NAILONG_SIMILARITY_MAX_STORAGE` | 否 | `10` | 本地存储报错图片最大上限,到达上限会压缩并删除上次记录 | | `NAILONG_SIMILARITY_MAX_STORAGE` | 否 | `10` | 本地存储报错图片最大上限,到达上限会压缩并删除上次记录 |
| `NAILONG_SIMILARITY_MAX_BATCH_SIZE` | 否 | `10` | 本地存储相似度检测时处理的最大批数量 | | `NAILONG_HF_TOKEN` | 否 | `None` | Hugging Face Access Token自动上传数据到hf并成为数据集贡献者 |
| **模型通用配置** | | | | | **模型通用配置** | | | |
| `NAILONG_MODEL_DIR` | 否 | `./data/nailongremove` | 模型的下载位置 | | `NAILONG_MODEL_DIR` | 否 | `./data/nailongremove` | 模型的下载位置 |
| `NAILONG_MODEL` | 否 | `1` | 选择需要加载的模型,可用模型见下 | | `NAILONG_MODEL` | 否 | `1` | 选择需要加载的模型,可用模型见下 |
@ -259,6 +259,12 @@ pip install nonebot-plugin-nailongremove-base -U
## 📝 更新日志 ## 📝 更新日志
### 2.3.3
- 优化临时处理方案减小性能压力同时提升速度向量库faiss也支持GPU处理但非专业人士不推荐使用GPU因为这个安装过程比较复杂
- 增加`NAILONG_HF_TOKEN`实现自动将报错图片上传Hugging Face数据集
- 更改配置项`NAILONG_TIP`和`NAILONG_FAILED_TIP`格式,允许随机发送返回消息,并且对应值为空列表`[]`时,仅检测图片(或者禁言撤回)而不会返回消息
### 2.3.2 ### 2.3.2
- 更新对GIF动图的三种帧处理模式通过`NAILONG_CHECK_MODE`自行选择 - 更新对GIF动图的三种帧处理模式通过`NAILONG_CHECK_MODE`自行选择

View File

@ -41,11 +41,11 @@ class Config(BaseModel):
nailong_recall: bool = True nailong_recall: bool = True
nailong_mute_seconds: int = 0 nailong_mute_seconds: int = 0
nailong_tip: Dict[str, str] = { nailong_tip: Dict[str, List[str]] = {
DEFAULT_LABEL: "本群禁止发送奶龙!", DEFAULT_LABEL: ["本群禁止发送奶龙!"],
} }
nailong_failed_tip: Dict[str, str] = { nailong_failed_tip: Dict[str, List[str]] = {
DEFAULT_LABEL: "{:Reply($message_id)}呜,不要发奶龙了嘛 🥺 👉👈", DEFAULT_LABEL: ["{:Reply($message_id)}呜,不要发奶龙了嘛 🥺 👉👈"],
} }
nailong_check_all_frames: bool = False nailong_check_all_frames: bool = False
@ -65,8 +65,8 @@ class Config(BaseModel):
nailong_model2_online: bool = False nailong_model2_online: bool = False
nailong_check_mode: int = 0 nailong_check_mode: int = 0
nailong_similarity_on: bool = False nailong_similarity_on: bool = False
nailong_similarity_max_storage: int = 10 nailong_similarity_max_storage: int = 1000
nailong_similarity_max_batch_size: int = 10 nailong_hf_token: Optional[str] = None
nailong_github_token: Optional[str] = None nailong_github_token: Optional[str] = None

View File

@ -1,3 +1,4 @@
import random
import re import re
from typing import Any, Awaitable, Callable, Iterable, List, TypeVar from typing import Any, Awaitable, Callable, Iterable, List, TypeVar
@ -104,14 +105,14 @@ async def handle_function(bot: BaseBot, ev: BaseEvent, msg: UniMsg, session: Uni
frames.append(temp_image) frames.append(temp_image)
except StopIteration: except StopIteration:
break break
zip_filename = process_gif_and_save_jpgs(frames, label, input_shape) commitInfo = process_gif_and_save_jpgs(frames, label, (224,224))
if zip_filename is None: if commitInfo is None:
await nailong.finish( await nailong.finish(
f"已保存数据到目录{config.nailong_model_dir}\\records\\{label},标签:{label}", f"The new data has been saved to the directory {config.nailong_model_dir}\\records\\{label}, label: {label}.",
) )
else: else:
await nailong.finish( await nailong.finish(
f"记录数据超过{config.nailong_similarity_max_storage},已清除原记录数据,压缩并保存至{zip_filename}\n已保存数据到目录{config.nailong_model_dir}\\records\\{label},标签:{label}", f"The recorded data has exceeded {config.nailong_similarity_max_storage}, the original data has been cleared, compressed, and upload to {commitInfo.commit_url}\nThe new data has been saved to the directory {config.nailong_model_dir}\\records\\{label}, label: {label}.",
) )
else: else:
try: try:
@ -131,9 +132,12 @@ async def handle_function(bot: BaseBot, ev: BaseEvent, msg: UniMsg, session: Uni
template_dict = ( template_dict = (
config.nailong_tip if punish_ok else config.nailong_failed_tip config.nailong_tip if punish_ok else config.nailong_failed_tip
) )
template_str = template_dict[ template_str_all = template_dict[
check_res.label if (check_res.label in template_dict) else DEFAULT_LABEL check_res.label if (check_res.label in template_dict) else DEFAULT_LABEL
] ]
if len(template_str_all) == 0:
continue
template_str=template_str_all[random.randint(0, len(template_str_all) - 1)]
mapping = { mapping = {
"$event": ev, "$event": ev,
"$target": msg.get_target(), "$target": msg.get_target(),

View File

@ -37,7 +37,7 @@ SIZE = 224
@run_sync @run_sync
def check_single(image: np.ndarray, is_gif: bool = False) -> CheckSingleResult[None]: def check_single(image: np.ndarray, is_gif: bool = False) -> CheckSingleResult[None]:
if is_gif: if is_gif:
res = similarity_process(image, dsize=(SIZE, SIZE)) res = similarity_process(image)
if res is not None: if res is not None:
return res return res
return CheckSingleResult.not_ok(None) return CheckSingleResult.not_ok(None)

View File

@ -68,7 +68,7 @@ input_shape = config.nailong_model1_yolox_size or config.nailong_model1_type.yol
@run_sync @run_sync
def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult: def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
if is_gif: if is_gif:
res = similarity_process(frame, dsize=input_shape) res = similarity_process(frame)
if res is not None: if res is not None:
return CheckSingleResult(ok=res.ok, label=res.label, extra=frame) return CheckSingleResult(ok=res.ok, label=res.label, extra=frame)
return CheckSingleResult(ok=False, label=None, extra=frame) return CheckSingleResult(ok=False, label=None, extra=frame)

View File

@ -87,7 +87,7 @@ def _check_single(
is_gif: bool = False, is_gif: bool = False,
) -> CheckSingleResult[Optional[Detections]]: ) -> CheckSingleResult[Optional[Detections]]:
if is_gif: if is_gif:
res = similarity_process(frame, dsize=input_shape) res = similarity_process(frame)
if res is not None: if res is not None:
return res return res
return CheckSingleResult.not_ok(None) return CheckSingleResult.not_ok(None)

View File

@ -7,13 +7,13 @@ import random
import shutil import shutil
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Any, Awaitable, Callable, Dict, Generic, Optional, TypeVar from typing import Any, Awaitable, Callable, Dict, Generic, Optional, TypeVar
from typing_extensions import TypeAlias from typing_extensions import TypeAlias
import cv2 import cv2
import numpy as np import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from ...config import config from ...config import config
from ...frame_source import FrameSource from ...frame_source import FrameSource
@ -21,6 +21,60 @@ T = TypeVar("T")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if config.nailong_similarity_on:
from huggingface_hub import PyTorchModelHubMixin
from torch import nn
import torchvision
from nonebot import logger
import faiss
import json
import sklearn
from torchvision import transforms
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5]) # Assuming grayscale or single-channel
])
class MyModel(
nn.Module,
PyTorchModelHubMixin,
):
def __init__(self):
super().__init__()
self.resnet = torchvision.models.resnet18(pretrained=False)
self.resnet.fc = nn.Linear(self.resnet.fc.in_features, 5) # Output dimension is 5
def forward(self, x):
return self.resnet(x)
features_model = MyModel.from_pretrained("refoundd/NailongFeatures",).to(device)
index_path=config.nailong_model_dir / 'records.index'
json_path=config.nailong_model_dir / 'records.json'
if os.path.exists(index_path):
index=faiss.read_index(str(index_path))
else:
index=faiss.IndexFlatL2(512)
if os.path.exists(json_path):
with open(json_path, 'r') as f:
index_cls=json.load(f)
else:
index_cls= {}
if torch.cuda.is_available():
try:
res = faiss.StandardGpuResources() # 创建GPU资源
index = faiss.index_cpu_to_gpu(res, 0, index) # 将CPU索引转移到GPU
except Exception as e:
logger.warning("load faiss-gpu failed.Please check your GPU device and install faiss-gpu first.")
def hook(model, input, output):
embeddings=input[0]
vector = embeddings.detach().cpu().numpy().astype(np.float32)
faiss.normalize_L2(vector)
global index
d, i = index.search(vector, 1)
return 1-d[0][0],i[0][0],vector
features_model.resnet.fc.register_forward_hook(hook)
features_model.eval()
@dataclass @dataclass
class CheckSingleResult(Generic[T]): class CheckSingleResult(Generic[T]):
@ -121,113 +175,79 @@ async def race_check(
return None return None
def similarity_process(image1: np.ndarray, dsize) -> Optional[CheckSingleResult]: def similarity_process(image1: np.ndarray, dsize=(224,224),similarity_threshold=1) -> Optional[CheckSingleResult]:
path = list(glob.glob(os.path.join(config.nailong_model_dir, "records/*/*.jpg"))) # image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
if len(path) == 0:
return None
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image1 = cv2.resize(image1, dsize, interpolation=cv2.INTER_LINEAR) image1 = cv2.resize(image1, dsize, interpolation=cv2.INTER_LINEAR)
image1_tensor = ( image1_tensor = transform(image1).unsqueeze(0).to(device)
torch.tensor(image1, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0) # image1_tensor = (
) # torch.tensor(image1, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
image1_tensor = image1_tensor.reshape(1, -1).to(device) # ).to(device)
for i in range(0, len(path), config.nailong_similarity_max_batch_size): distance,indice,_=features_model(image1_tensor)
temp_paths = path[ if distance >= similarity_threshold:
i : (min(len(path), i + config.nailong_similarity_max_batch_size)) label =index_cls[str(indice)]
]
image2s = []
for image_path in temp_paths:
image2 = cv2.imread(image_path)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image2 = cv2.resize(image2, dsize, interpolation=cv2.INTER_LINEAR)
image2s.append(image2)
image2_tensor = torch.tensor(np.array(image2s), dtype=torch.float32).permute(
0,
3,
1,
2,
)
image2_tensor = image2_tensor.reshape(image2_tensor.shape[0], -1).to(device)
similarities = F.cosine_similarity(image1_tensor, image2_tensor)
indices = torch.nonzero(similarities > 0.99)
index = indices[0].item() if indices.numel() > 0 else None
if index is not None:
image_path = path[index]
label = os.path.split(image_path)[-2].split("\\")[-1]
return CheckSingleResult(ok=True, label=label, extra=None) return CheckSingleResult(ok=True, label=label, extra=None)
return None return None
def process_gif_and_save_jpgs(frames, label, dsize, similarity_threshold=0.85): def process_gif_and_save_jpgs(frames, label, dsize=(224,224), similarity_threshold=1):
if ( if (
len( len(
list( list(
glob.glob( glob.glob(
os.path.join(str(config.nailong_model_dir), "records/*/*.jpg"), str(config.nailong_model_dir / "records/*/*.jpg")
), ),
), ),
) )
>= config.nailong_similarity_max_storage >= config.nailong_similarity_max_storage and config.nailong_hf_token is not None
): ):
zip_filename = shutil.make_archive( zip_filename = shutil.make_archive(
os.path.join( config.nailong_model_dir / "{}_records".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")),
str(config.nailong_model_dir),
"{}_records".format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
),
),
"zip", "zip",
os.path.join(str(config.nailong_model_dir), "records"), config.nailong_model_dir / "records"
) )
shutil.rmtree(os.path.join(str(config.nailong_model_dir), "records")) shutil.rmtree(config.nailong_model_dir / "records")
from huggingface_hub import HfApi
api = HfApi()
commitInfo=api.upload_file(
path_or_fileobj=zip_filename,
path_in_repo="new_dataset.zip",
repo_id="refoundd/NailongClassification",
repo_type="dataset",
create_pr=True,
token=config.nailong_hf_token,
)
# os.remove(zip_filename)
else: else:
zip_filename = None commitInfo = None
output_dir = os.path.join(str(config.nailong_model_dir), "records", label) output_dir = config.nailong_model_dir / "records"/ label
if not os.path.exists(output_dir): if not os.path.exists(output_dir):
os.makedirs(output_dir) os.makedirs(output_dir)
frame_count = [i for i in range(len(frames))] count=0
while len(frame_count) > 0: for frame in frames:
frame_num1 = frame_count[0]
frame_count.remove(frame_num1)
frame1 = frames[frame_num1]
frame_filename = os.path.join( frame_filename = os.path.join(
output_dir, output_dir,
"frame{}_{}.jpg".format( "frame{}_{}.jpg".format(
frame_num1, count,
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
), ),
) )
while os.path.exists(frame_filename): while os.path.exists(frame_filename):
frame_filename = "exist-" + frame_filename frame_filename = "exist-" + frame_filename
cv2.imwrite(frame_filename, frame1) cv2.imwrite(frame_filename, frame)
# frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB) # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame1 = cv2.resize(frame1, dsize) frame = cv2.resize(frame, dsize,interpolation=cv2.INTER_LINEAR)
image1_tensor = ( image1_tensor = transform(frame).unsqueeze(0).to(device)
torch.tensor(frame1, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0) # image1_tensor = (
) # torch.tensor(frame, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
image1_tensor = image1_tensor.reshape(1, -1).to(device) # ).to(device)
max_length = len(list(frame_count)) d,i,features=features_model(image1_tensor)
indexs = [] if d >= similarity_threshold:
for i in range(0, max_length, config.nailong_similarity_max_batch_size): index_cls[str(i)]=label
frame2_num = frame_count[ else:
i : (min(max_length, i + config.nailong_similarity_max_batch_size)) index.add(features)
] index_cls[str(index.ntotal-1)]=label
frame2 = [frames[i] for i in frame2_num] count+=1
# frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB) faiss.write_index(index, str(index_path))
frame2 = [cv2.resize(t, dsize) for t in frame2] with open(json_path,'w') as f:
image2_tensor = torch.tensor(np.array(frame2), dtype=torch.float32).permute( json.dump(index_cls,f)
0, return commitInfo
3,
1,
2,
)
image2_tensor = image2_tensor.reshape(image2_tensor.shape[0], -1).to(device)
similarities = F.cosine_similarity(image1_tensor, image2_tensor)
indices = torch.nonzero(similarities > similarity_threshold)
index = indices.squeeze().tolist() if indices.numel() > 0 else None
if type(index) is int:
index = [index]
if index is not None:
indexs.extend([frame2_num[i] for i in index])
frame_count = [i for i in frame_count if i not in indexs]
return zip_filename

View File

@ -22,6 +22,9 @@ dependencies = [
"huggingface-hub>=0.26.2", "huggingface-hub>=0.26.2",
"ultralytics>=8.3.31", "ultralytics>=8.3.31",
"gradio-client>=1.3.0", "gradio-client>=1.3.0",
"faiss-cpu>=1.9.0.post1",
"faiss-gpu>=1.7.2",
"scikit-learn>=1.5.2",
] ]
license = { text = "MIT" } license = { text = "MIT" }
readme = "README.md" readme = "README.md"