This commit is contained in:
student_2333 2024-11-21 00:49:22 +08:00
parent b9b54d500e
commit 7d1ee46e09
No known key found for this signature in database
GPG Key ID: B36E8DF853C33EE1
7 changed files with 93 additions and 128 deletions

View File

@ -1,8 +1,7 @@
from enum import Enum, auto
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from cookit import StrEnum
from cookit.pyd import field_validator
from nonebot import get_plugin_config
from pydantic import BaseModel, Field
@ -15,16 +14,12 @@ class ModelType(int, Enum):
TARGET_DETECTION = 1
class Model1Type(StrEnum):
TINY = auto()
M = auto()
@property
def yolox_size(self) -> Tuple[int, int]:
return {
Model1Type.TINY: (416, 416),
Model1Type.M: (640, 640),
}[self]
MODEL1_YOLOX_SIZE_MAP = {
"tiny": (416, 416),
"m": (640, 640),
"m_beta": (640, 640),
}
MODEL1_DEFAULT_TYPE = "tiny"
class Config(BaseModel):
@ -56,7 +51,7 @@ class Config(BaseModel):
nailong_concurrency: int = 1
nailong_onnx_providers: List[str] = ["CPUExecutionProvider"]
nailong_model1_type: Model1Type = Model1Type.TINY
nailong_model1_type: str = MODEL1_DEFAULT_TYPE
nailong_model1_yolox_size: Optional[Tuple[int, int]] = None
nailong_model1_score: Dict[str, Optional[float]] = {
DEFAULT_LABEL: 0.5,

View File

@ -6,6 +6,7 @@ from typing import (
Callable,
Dict,
Generic,
Iterable,
Iterator,
Tuple,
Type,
@ -27,6 +28,14 @@ from PIL import Image as Img, ImageSequence
T = TypeVar("T")
class FrameSaver(ABC, Generic[T]):
@abstractmethod
async def save(self, frames: Iterable[np.ndarray]) -> Segment: ...
# class FrameSource(ABC, Sequence[T], Generic[T]):
# TODO 实现 Sequence 的方法以便抽帧检测
# 删除 __iter__改为实现 __len__ 与 __getitem__
class FrameSource(ABC, Generic[T]):
def __init__(self, data: T) -> None:
super().__init__()
@ -35,34 +44,8 @@ class FrameSource(ABC, Generic[T]):
@abstractmethod
def __iter__(self) -> Iterator[np.ndarray]: ...
class PilImageFrameSource(FrameSource[Img.Image]):
def __init__(self, data: Img.Image) -> None:
super().__init__(data)
@classmethod
def from_raw(cls, raw: bytes) -> Self:
return cls(Img.open(BytesIO(raw)))
@override
def __iter__(self) -> Iterator[np.ndarray]:
for frame in ImageSequence.Iterator(self.data):
image_array = np.array(frame.convert("RGB"))
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
yield image_array
TFS = TypeVar("TFS", bound=FrameSource)
RepackSaver: TypeAlias = Callable[[TFS, Iterator[np.ndarray]], Awaitable[Segment]]
repack_savers: Dict[Type[FrameSource], RepackSaver] = {}
def repack_saver(t: Type[TFS]):
def deco(func: RepackSaver[TFS]):
repack_savers[t] = func
return func
return deco
@abstractmethod
def get_saver(self) -> FrameSaver[T]: ...
# https://github.com/MeetWq/meme-generator/blob/main/meme_generator/utils.py#L60
@ -103,6 +86,23 @@ def save_gif(frames: list[Img.Image], duration: float) -> BytesIO:
return save_gif(new_frames, duration)
class PilImageFrameSaver(FrameSaver[Img.Image]):
def __init__(self, duration: float) -> None:
super().__init__()
self.duration = duration
async def save(self, frames: Iterable[np.ndarray]) -> Segment:
frame_images = [
Img.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames
]
if len(frame_images) == 1:
bio = BytesIO()
frame_images[0].save(bio, format="PNG")
else:
bio = save_gif(frame_images, self.duration)
return Image(raw=bio)
# https://github.com/MeetWq/meme-generator/blob/main/meme_generator/utils.py#L97
def get_avg_duration(image: Img.Image) -> float:
if not getattr(image, "is_animated", False):
@ -114,26 +114,24 @@ def get_avg_duration(image: Img.Image) -> float:
return total_duration / len(frames) / 1000
@repack_saver(PilImageFrameSource)
async def _(source: PilImageFrameSource, frames: Iterator[np.ndarray]) -> Segment:
frame_images = [
Img.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames
]
if len(frame_images) == 1:
bio = BytesIO()
frame_images[0].save(bio, format="PNG")
else:
bio = save_gif(frame_images, get_avg_duration(source.data))
return Image(raw=bio)
class PilImageFrameSource(FrameSource[Img.Image]):
def __init__(self, data: Img.Image) -> None:
super().__init__(data)
@classmethod
def from_raw(cls, raw: bytes) -> Self:
return cls(Img.open(BytesIO(raw)))
def repack_save(
source: FrameSource,
frames: Iterator[np.ndarray],
) -> Awaitable[Segment]:
if (k := type(source)) not in repack_savers:
raise NotImplementedError
return repack_savers[k](source, frames)
@override
def __iter__(self) -> Iterator[np.ndarray]:
for frame in ImageSequence.Iterator(self.data):
image_array = np.array(frame.convert("RGB"))
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
yield image_array
@override
def get_saver(self):
return PilImageFrameSaver(get_avg_duration(self.data))
TS = TypeVar("TS", bound=Segment)

View File

@ -1,38 +1,16 @@
from typing import Awaitable, Callable, NoReturn
from typing import Awaitable, Callable
from ..config import ModelType, config
from ..frame_source import FrameSource
from .utils.common import CheckResult as CheckResult
def raise_extra_import_error(e: BaseException, group: str) -> NoReturn:
raise ImportError(
f"Possibly missing required libraries, "
f"Please run `pip install nonebot-plugin-nailongremove[{group}]` "
f"in your project's environment to install.",
) from e
check: Callable[[FrameSource], Awaitable[CheckResult]]
if config.nailong_model is ModelType.CLASSIFICATION:
try:
from .classification import check as check
except ImportError as e:
raise_extra_import_error(e, "model0")
from .classification import check as check
elif config.nailong_model is ModelType.TARGET_DETECTION:
try:
from .target_detection import check as check
except ImportError as e:
raise ImportError(
"To avoid dependency issues, please install onnxruntime manually.\n"
"If you have a compatible GPU, "
"please run `pip install onnxruntime-gpu` in your project's environment, "
"then edit plugin's `NAILONG_ONNX_PROVIDERS` config to use it;\n"
"Otherwise run `pip install onnxruntime` in your project's environment "
"and use CPU to compute.",
) from e
from .target_detection import check as check
else:
raise NotImplementedError # never reach here

View File

@ -1,11 +1,12 @@
from typing import Any
from typing import cast
import cv2
import numpy as np
import torch
from nonebot.utils import run_sync
from torch import nn
from torch import Tensor, nn
from torchvision import transforms
from torchvision.models import ResNet
from ..config import DEFAULT_LABEL
from ..frame_source import FrameSource
@ -22,7 +23,7 @@ model_path = GitHubRepoModelUpdater(
cuda_available = torch.cuda.is_available()
device = torch.device("cuda" if cuda_available else "cpu")
transform = transforms.Compose([transforms.ToTensor()])
model: Any = torch.hub.load("pytorch/vision:v0.10.0", "resnet50", weights=None)
model = cast(ResNet, torch.hub.load("pytorch/vision:v0.10.0", "resnet50", weights=None))
model.fc = nn.Linear(model.fc.in_features, 2) # 修改最后一层为分类层
model.load_state_dict(
torch.load(model_path, weights_only=True, map_location=device),
@ -37,17 +38,20 @@ SIZE = 224
@run_sync
def check_single(image: np.ndarray) -> CheckSingleResult[None]:
if image.shape[0] < SIZE or image.shape[1] < SIZE:
return CheckSingleResult.not_ok(None)
return CheckSingleResult()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (SIZE, SIZE))
image = transform(image)
image = image.unsqueeze(0) # type: ignore
tensor = cast(Tensor, transform(image))
tensor = tensor.unsqueeze(0)
with torch.no_grad():
output = model(image.to(device)) # type: ignore
output: Tensor = model(tensor.to(device))
_, pred = torch.max(output, 1)
return CheckSingleResult(ok=pred.item() == 1, label=DEFAULT_LABEL, extra=None)
return CheckSingleResult(
label=DEFAULT_LABEL if pred.item() == 1 else None,
extra=None,
)
async def check(source: FrameSource):
async def check(source: FrameSource) -> CheckResult:
res = await race_check(check_single, source)
return CheckResult(ok=bool(res), label=res.label if res else None)
return CheckResult(label=res.label if res else None)

View File

@ -11,13 +11,13 @@ import numpy as np
from cookit import with_semaphore
from nonebot.utils import run_sync
from ..config import config
from ..frame_source import FrameSource, repack_save
from ..config import MODEL1_DEFAULT_TYPE, MODEL1_YOLOX_SIZE_MAP, config
from ..frame_source import FrameSource
from .utils.common import CheckResult, CheckSingleResult, race_check
from .utils.update import GitHubLatestReleaseModelUpdater, ModelInfo, UpdaterGroup
from .utils.yolox import demo_postprocess, multiclass_nms, preprocess, vis
model_filename_sfx = f"_{config.nailong_model1_type.value}.onnx"
model_filename_sfx = f"_{config.nailong_model1_type}.onnx"
class ModelUpdater(GitHubLatestReleaseModelUpdater):
@ -49,7 +49,11 @@ session = onnxruntime.InferenceSession(
model_path,
providers=config.nailong_onnx_providers,
)
input_shape = config.nailong_model1_yolox_size or config.nailong_model1_type.yolox_size
input_shape = (
config.nailong_model1_yolox_size
or MODEL1_YOLOX_SIZE_MAP.get(config.nailong_model1_type)
or MODEL1_YOLOX_SIZE_MAP[MODEL1_DEFAULT_TYPE]
)
@dataclass
@ -97,7 +101,7 @@ def _check_single(frame: np.ndarray) -> CheckSingleResult[Optional[Detections]]:
boxes_xyxy /= ratio
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
if dets is None:
return CheckSingleResult.not_ok(None)
return CheckSingleResult()
final_boxes, final_scores, final_cls_ids = (
dets[:, :4], # type: ignore
@ -109,17 +113,15 @@ def _check_single(frame: np.ndarray) -> CheckSingleResult[Optional[Detections]]:
expected = config.nailong_model1_score.get(label)
if (expected is not None) and s >= expected:
return CheckSingleResult(
ok=True,
label=label,
extra=Detections(final_boxes, final_scores, final_cls_ids),
)
return CheckSingleResult.not_ok(None)
return CheckSingleResult()
async def check_single(frame: np.ndarray) -> CheckSingleResult[FrameInfo]:
res = await _check_single(frame)
return CheckSingleResult(
ok=res.ok,
label=res.label,
extra=FrameInfo(frame, res.extra),
)
@ -133,15 +135,14 @@ async def check(source: FrameSource) -> CheckResult:
results = await asyncio.gather(
*(with_semaphore(sem)(check_single)(frame) for frame in source),
)
ok = any(r.ok for r in results)
ok = any(r.label for r in results)
if ok:
all_labels = {r.label for r in results if r.label}
label = next(
(x for x in config.nailong_model1_score if x in all_labels),
None,
)
extra_vars["$checked_result"] = await repack_save(
source,
extra_vars["$checked_result"] = await source.get_saver().save(
(r.extra.vis() for r in results),
)
else:
@ -149,8 +150,7 @@ async def check(source: FrameSource) -> CheckResult:
ok = bool(res)
if res:
label = res.label
extra_vars["$checked_result"] = await repack_save(
source,
iter((res.extra.vis(),)),
extra_vars["$checked_result"] = await source.get_saver().save(
(res.extra.vis(),),
)
return CheckResult(ok, label, extra_vars)
return CheckResult(label, extra_vars)

View File

@ -13,25 +13,15 @@ T = TypeVar("T")
@dataclass
class CheckSingleResult(Generic[T]):
ok: bool
label: Optional[str]
extra: T
@classmethod
def not_ok(cls, extra: T):
return cls(ok=False, label=None, extra=extra)
label: Optional[str] = None
extra: T = None # type: ignore
@dataclass
class CheckResult:
ok: bool
label: Optional[str]
label: Optional[str] = None
extra_vars: Dict[str, Any] = field(default_factory=dict)
@classmethod
def not_ok(cls):
return cls(ok=False, label=None, extra_vars={})
FrameChecker: TypeAlias = Callable[[np.ndarray], Awaitable[CheckSingleResult[T]]]
@ -43,14 +33,14 @@ async def race_check(
) -> Optional[CheckSingleResult[T]]:
iterator = iter(frames)
async def worker() -> CheckSingleResult:
async def worker() -> Optional[CheckSingleResult[T]]:
while True:
try:
frame = next(iterator)
except StopIteration:
return CheckSingleResult.not_ok(None)
return None
res = await checker(frame)
if res.ok:
if res.label:
return res
tasks = [asyncio.create_task(worker()) for _ in range(concurrency)]
@ -59,7 +49,7 @@ async def race_check(
break
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for t in done:
if (res := t.result()).ok:
if (res := t.result()) and res.label:
for pt in pending:
pt.cancel()
return res

View File

@ -193,7 +193,7 @@ class ModelUpdater(ABC):
f"Failed to get model info in {type(self).__name__}, skipping update: "
f"{type(e).__name__}: {e}",
)
logger.debug("Stacktrace")
logger.opt(exception=e).debug("Stacktrace")
return local
model_path = self.get_path(info.filename)
@ -222,7 +222,7 @@ class ModelUpdater(ABC):
logger.error(
f"Failed to update model, skipping: {type(e).__name__}: {e}",
)
logger.debug("Stacktrace")
logger.opt(exception=e).debug("Stacktrace")
return local
else:
self.save_local_ver(info)