This commit is contained in:
student_2333 2024-11-06 01:50:29 +08:00
parent e65f3ada15
commit 3291728604
No known key found for this signature in database
GPG Key ID: 665F083BEC56F2A6
7 changed files with 355 additions and 124 deletions

View File

@ -25,6 +25,7 @@ class Config(BaseModel):
nailong_mute_seconds: int = 0
nailong_tip: str = "本群禁止发送奶龙!"
nailong_failed_tip: str = "{:Reply($message_id)}呜,不要发奶龙了嘛 🥺 👉👈"
nailong_checked_result_all: bool = False
nailong_model_dir: Path = Field(
default_factory=lambda: Path.cwd() / "data" / "nailongremove",

View File

@ -0,0 +1,193 @@
from abc import ABC, abstractmethod
from io import BytesIO
from typing import (
AsyncIterator,
Awaitable,
Callable,
Dict,
Generic,
Iterator,
Tuple,
Type,
TypeVar,
cast,
)
from typing_extensions import Self, TypeAlias, override
import cv2
import numpy as np
from nonebot import logger
from nonebot.drivers import Request
from nonebot.matcher import current_bot, current_event, current_matcher
from nonebot_plugin_alconna.builtins.uniseg.market_face import MarketFace
from nonebot_plugin_alconna.uniseg import Image, Segment, UniMessage
from nonebot_plugin_alconna.uniseg.tools import image_fetch
from PIL import Image as Img, ImageSequence
T = TypeVar("T")
class FrameSource(ABC, Generic[T]):
def __init__(self, data: T) -> None:
super().__init__()
self.data = data
@abstractmethod
def __iter__(self) -> Iterator[np.ndarray]: ...
class PilImageFrameSource(FrameSource[Img.Image]):
def __init__(self, data: Img.Image) -> None:
super().__init__(data)
@classmethod
def from_raw(cls, raw: bytes) -> Self:
return cls(Img.open(BytesIO(raw)))
@override
def __iter__(self) -> Iterator[np.ndarray]:
for frame in ImageSequence.Iterator(self.data):
image_array = np.array(frame.convert("RGB"))
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
yield image_array
TFS = TypeVar("TFS", bound=FrameSource)
RepackSaver: TypeAlias = Callable[[TFS, Iterator[np.ndarray]], Awaitable[Segment]]
repack_savers: Dict[Type[FrameSource], RepackSaver] = {}
def repack_saver(t: Type[TFS]):
def deco(func: RepackSaver[TFS]):
repack_savers[t] = func
return func
return deco
# https://github.com/MeetWq/meme-generator/blob/main/meme_generator/utils.py#L60
def save_gif(frames: list[Img.Image], duration: float) -> BytesIO:
output = BytesIO()
frames[0].save(
output,
format="GIF",
save_all=True,
append_images=frames[1:],
duration=duration * 1000,
loop=0,
disposal=2,
optimize=False,
)
# 没有超出最大大小,直接返回
nbytes = output.getbuffer().nbytes
if nbytes <= 10000000: # meme_config.gif.gif_max_size * 10**6:
return output
# 超出最大大小,帧数超出最大帧数时,缩减帧数
n_frames = len(frames)
gif_max_frames = 100 # meme_config.gif.gif_max_frames
if n_frames > gif_max_frames:
index = range(n_frames)
ratio = n_frames / gif_max_frames
index = (int(i * ratio) for i in range(gif_max_frames))
new_duration = duration * ratio
new_frames = [frames[i] for i in index]
return save_gif(new_frames, new_duration)
# 超出最大大小,帧数没有超出最大帧数时,缩小尺寸
new_frames = [
frame.resize((int(frame.width * 0.9), int(frame.height * 0.9)))
for frame in frames
]
return save_gif(new_frames, duration)
# https://github.com/MeetWq/meme-generator/blob/main/meme_generator/utils.py#L97
def get_avg_duration(image: Img.Image) -> float:
if not getattr(image, "is_animated", False):
return 0
total_duration = 0
n_frames = getattr(image, "n_frames", 1)
for i in range(n_frames):
image.seek(i)
total_duration += image.info.get("duration", 20)
return total_duration / n_frames / 1000
@repack_saver(PilImageFrameSource)
async def _(source: PilImageFrameSource, frames: Iterator[np.ndarray]) -> Segment:
frame_images = [
Img.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames
]
if len(frame_images) == 1:
bio = BytesIO()
frame_images[0].save(bio, format="PNG")
else:
bio = save_gif(frame_images, get_avg_duration(source.data))
return Image(raw=bio)
def repack_save(
source: FrameSource,
frames: Iterator[np.ndarray],
) -> Awaitable[Segment]:
if (k := type(source)) not in repack_savers:
raise NotImplementedError
return repack_savers[k](source, frames)
TS = TypeVar("TS", bound=Segment)
SourceExtractor: TypeAlias = Callable[[TS], Awaitable[FrameSource]]
source_extractors: Dict[Type[Segment], SourceExtractor] = {}
def source_extractor(t: Type[TS]):
def deco(func: SourceExtractor[TS]):
source_extractors[t] = func
return func
return deco
@source_extractor(Image)
async def _(seg: Image):
image = await image_fetch(
current_event.get(),
current_bot.get(),
current_matcher.get().state,
seg,
)
if not image:
raise RuntimeError("Cannot fetch image")
return PilImageFrameSource.from_raw(image)
@source_extractor(MarketFace)
async def _(seg: MarketFace):
url = (
f"https://gxh.vip.qq.com/club/item/parcel/item/{seg.id[:2]}/{seg.id}/raw300.gif"
)
req = Request("GET", url)
resp = await current_bot.get().adapter.request(req)
image = cast(bytes, resp.content)
return PilImageFrameSource.from_raw(image)
async def extract_source(seg: Segment) -> FrameSource:
if (k := type(seg)) not in source_extractors:
raise NotImplementedError
return await source_extractors[k](seg)
async def iter_frames_in_message(
message: UniMessage,
) -> AsyncIterator[Tuple[FrameSource, Segment]]:
for seg in message:
try:
yield await extract_source(seg), seg
except NotImplementedError:
continue
except Exception as e:
logger.warning(f"Failed to process {seg!r}: {type(e).__name__}: {e}")
logger.opt(exception=e).debug("Stacktrace")

View File

@ -1,35 +1,22 @@
import asyncio
import io
from typing import Any, Awaitable, Callable, Iterable, Iterator, List, TypeVar, cast
from typing import Any, Awaitable, Callable, Iterable, List, TypeVar
import cv2
import numpy as np
from nonebot import logger, on_message
from nonebot.adapters import Bot as BaseBot, Event as BaseEvent
from nonebot.drivers import Request
from nonebot.permission import SUPERUSER
from nonebot.rule import Rule
from nonebot.typing import T_State
from nonebot_plugin_alconna.builtins.uniseg.market_face import MarketFace
from nonebot_plugin_alconna.uniseg import Image, UniMessage, UniMsg, image_fetch
from nonebot_plugin_alconna.uniseg import Image, UniMessage, UniMsg
from nonebot_plugin_uninfo import QryItrface, Uninfo
from PIL import Image as PilImage, ImageSequence
from nonebot_plugin_nailongremove.frame_source import iter_frames_in_message
from .config import config
from .model import CheckResultTuple, check_image
from .model import check
from .uniapi import mute, recall
T = TypeVar("T")
def transform_image(image_data: bytes) -> Iterator[np.ndarray]:
image = PilImage.open(io.BytesIO(image_data))
for frame in ImageSequence.Iterator(image):
image_array = np.array(frame.convert("RGB"))
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
yield image_array
def judge_list(lst: Iterable[T], val: T, blacklist: bool) -> bool:
return (val not in lst) if blacklist else (val in lst)
@ -92,72 +79,18 @@ async def nailong_rule(
)
async def check_frames(frames: Iterator[np.ndarray]) -> CheckResultTuple:
async def worker() -> CheckResultTuple:
while True:
try:
frame = next(frames)
except StopIteration:
return False, None
res = await check_image(frame)
if not isinstance(res, tuple):
res = res, None
if res[0]:
return res
tasks = [asyncio.create_task(worker()) for _ in range(config.nailong_concurrency)]
while True:
if not tasks:
break
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for t in done:
if (res := t.result())[0]:
for pt in pending:
pt.cancel()
return res
tasks = pending
return False, None
nailong = on_message(rule=Rule(nailong_rule), priority=config.nailong_priority)
@nailong.handle()
async def handle_function(
bot: BaseBot,
ev: BaseEvent,
msg: UniMsg,
session: Uninfo,
state: T_State,
):
for seg in msg:
if isinstance(seg, Image):
image = await image_fetch(ev, bot, state, seg)
if not image:
logger.warning(f"Failed to fetch image: {seg!r}")
continue
elif isinstance(seg, MarketFace):
url = f"https://gxh.vip.qq.com/club/item/parcel/item/{seg.id[:2]}/{seg.id}/raw300.gif"
req = Request("GET", url)
try:
resp = await bot.adapter.request(req)
except Exception as e:
logger.warning(f"Failed to fetch {seg!r}: {type(e).__name__}: {e}")
continue
image = cast(bytes, resp.content)
else:
continue
async def handle_function(bot: BaseBot, ev: BaseEvent, msg: UniMsg, session: Uninfo):
async for frames, seg in iter_frames_in_message(msg):
try:
frames = transform_image(image)
check_ok, checked_image = await check_frames(frames)
check_res = await check(frames)
except Exception:
logger.exception(f"Failed to process image: {seg!r}")
logger.exception(f"Failed to check {seg!r}")
continue
if not check_ok:
if not check_res.ok:
continue
functions: List[Callable[[], Awaitable[Any]]] = []
@ -174,10 +107,6 @@ async def handle_function(
"$message_id": msg.get_message_id(),
"$msg": msg,
"$ss": session,
**check_res.extra_vars,
}
if checked_image is not None:
bio = io.BytesIO()
img = PilImage.fromarray(cv2.cvtColor(checked_image, cv2.COLOR_BGR2RGB))
img.save(bio, "PNG")
mapping["$checked_image"] = bio.getvalue()
await UniMessage.template(template_str).format_map(mapping).finish()

View File

@ -1,10 +1,8 @@
from typing import Awaitable, Callable, Literal, NoReturn, Tuple, Union
from typing_extensions import TypeAlias
import numpy as np
from nonebot.utils import run_sync
from typing import Awaitable, Callable, NoReturn
from ..config import ModelType, config
from ..frame_source import FrameSource
from .common import CheckResult as CheckResult
def raise_extra_import_error(e: BaseException, group: str) -> NoReturn:
@ -15,25 +13,16 @@ def raise_extra_import_error(e: BaseException, group: str) -> NoReturn:
) from e
CheckResultTuple: TypeAlias = Union[
Tuple[bool, None],
Tuple[Literal[True], np.ndarray],
]
CheckResult: TypeAlias = Union[bool, CheckResultTuple]
check_image_sync: Callable[[np.ndarray], CheckResult]
check: Callable[[FrameSource], Awaitable[CheckResult]]
if config.nailong_model is ModelType.CLASSIFICATION:
from .classification import check_image as check_image_sync
from .classification import check as check
elif config.nailong_model is ModelType.TARGET_DETECTION:
try:
from .target_detection import check_image as check_image_sync
from .target_detection import check as check
except ImportError as e:
raise_extra_import_error(e, "model1")
else:
raise ValueError("Invalid model type")
check_image: Callable[[np.ndarray], Awaitable[CheckResult]] = run_sync(check_image_sync)

View File

@ -1,16 +1,16 @@
from typing import TYPE_CHECKING, Any
from typing import Any
import cv2
import numpy as np
import torch
from nonebot.utils import run_sync
from torch import nn
from torchvision import transforms
from ..frame_source import FrameSource
from .common import CheckResult, CheckSingleResult, race_check
from .update import GitHubRepoModelUpdater
if TYPE_CHECKING:
from . import CheckResult
model_path = GitHubRepoModelUpdater(
"spawner1145",
"NailongRecognize",
@ -33,9 +33,10 @@ if cuda_available:
SIZE = 224
def check_image(image: np.ndarray) -> "CheckResult":
@run_sync
def check_single(image: np.ndarray) -> CheckSingleResult[None]:
if image.shape[0] < SIZE or image.shape[1] < SIZE:
return False
return CheckSingleResult(ok=False, extra=None)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (SIZE, SIZE))
image = transform(image)
@ -43,4 +44,9 @@ def check_image(image: np.ndarray) -> "CheckResult":
with torch.no_grad():
output = model(image.to(device)) # type: ignore
_, pred = torch.max(output, 1)
return pred.item() == 1
return CheckSingleResult(ok=pred.item() == 1, extra=None)
async def check(frames: FrameSource):
res = await race_check(check_single, frames)
return CheckResult(ok=bool(res))

View File

@ -0,0 +1,58 @@
import asyncio
from dataclasses import dataclass, field
from typing import Any, Awaitable, Callable, Dict, Generic, Optional, TypeVar
from typing_extensions import TypeAlias
import numpy as np
from ..config import config
from ..frame_source import FrameSource
T = TypeVar("T")
@dataclass
class CheckSingleResult(Generic[T]):
ok: bool
extra: T
@dataclass
class CheckResult:
ok: bool
extra_vars: Dict[str, Any] = field(default_factory=dict)
FrameChecker: TypeAlias = Callable[[np.ndarray], Awaitable[CheckSingleResult[T]]]
async def race_check(
checker: FrameChecker[T],
frames: FrameSource,
concurrency: int = config.nailong_concurrency,
) -> Optional[CheckSingleResult[T]]:
iterator = iter(frames)
async def worker() -> CheckSingleResult:
while True:
try:
frame = next(iterator)
except StopIteration:
return CheckSingleResult(ok=False, extra=None)
res = await checker(frame)
if res.ok:
return res
tasks = [asyncio.create_task(worker()) for _ in range(concurrency)]
while True:
if not tasks:
break
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for t in done:
if (res := t.result()).ok:
for pt in pending:
pt.cancel()
return res
tasks = pending
return None

View File

@ -1,16 +1,19 @@
from typing import TYPE_CHECKING
import asyncio
from dataclasses import dataclass
from typing import Optional
from typing_extensions import override
import numpy as np
import onnxruntime
from cookit import with_semaphore
from nonebot.utils import run_sync
from ..config import config
from ..frame_source import FrameSource, repack_save
from .common import CheckResult, CheckSingleResult, race_check
from .update import GitHubLatestReleaseModelUpdater, ModelInfo, UpdaterGroup
from .yolox_utils import demo_postprocess, multiclass_nms, preprocess, vis
if TYPE_CHECKING:
from . import CheckResult
model_filename_sfx = f"_{config.nailong_model1_type}.onnx"
@ -54,8 +57,36 @@ session = onnxruntime.InferenceSession(
input_shape = config.nailong_model1_yolox_size
def check_image(image: np.ndarray) -> "CheckResult":
img, ratio = preprocess(image, input_shape)
@dataclass
class Detections:
boxes: np.ndarray
scores: np.ndarray
ids: np.ndarray
@dataclass
class FrameInfo:
frame: np.ndarray
detections: Optional[Detections] = None
def vis(self) -> np.ndarray:
return (
vis(
self.frame,
self.detections.boxes,
self.detections.scores,
self.detections.ids,
conf=0.3,
class_names=labels,
)
if self.detections
else self.frame
)
@run_sync
def _check_single(frame: np.ndarray) -> CheckSingleResult[Optional[Detections]]:
img, ratio = preprocess(frame, input_shape)
ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
output = session.run(None, ort_inputs)
predictions = demo_postprocess(output[0], input_shape)[0]
@ -71,26 +102,50 @@ def check_image(image: np.ndarray) -> "CheckResult":
boxes_xyxy /= ratio
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
if dets is None:
return False
return CheckSingleResult(ok=False, extra=None)
final_boxes, final_scores, final_cls_inds = (
final_boxes, final_scores, final_cls_ids = (
dets[:, :4], # type: ignore
dets[:, 4], # type: ignore
dets[:, 5], # type: ignore
)
has = any(
True
for c, s in zip(final_cls_inds, final_scores)
for c, s in zip(final_cls_ids, final_scores)
if labels[int(c)] == "nailong" and s >= config.nailong_model1_score
)
if has:
image = vis(
image,
final_boxes,
final_scores,
final_cls_inds,
conf=0.3,
class_names=labels,
return CheckSingleResult(
ok=True,
extra=Detections(final_boxes, final_scores, final_cls_ids),
)
return True, image
return False
return CheckSingleResult(ok=False, extra=None)
async def check_single(frame: np.ndarray) -> CheckSingleResult[FrameInfo]:
res = await _check_single(frame)
return CheckSingleResult(ok=res.ok, extra=FrameInfo(frame, res.extra))
async def check(frames: FrameSource) -> CheckResult:
extra_vars = {}
if config.nailong_checked_result_all:
sem = asyncio.Semaphore(config.nailong_concurrency)
results = asyncio.gather(
*(with_semaphore(sem)(check_single)(frame) for frame in frames),
)
ok = any(r.ok for r in results)
if ok:
extra_vars["$checked_result"] = await repack_save(
frames,
(r.extra.vis() for r in results),
)
else:
res = await race_check(check_single, frames)
ok = bool(res)
if res:
extra_vars["$checked_result"] = await repack_save(
frames,
iter((res.extra.vis(),)),
)
return CheckResult(ok, extra_vars)