mirror of
https://github.com/Refound-445/nonebot-plugin-nailongremove.git
synced 2025-11-04 21:22:43 +08:00
up
This commit is contained in:
parent
d98f6bd411
commit
e7b9afd4d7
8
.idea/.gitignore
generated
vendored
Normal file
8
.idea/.gitignore
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# 默认忽略的文件
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# 基于编辑器的 HTTP 客户端请求
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
6
.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
6
.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
@ -0,0 +1,6 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
||||
4
.idea/misc.xml
generated
Normal file
4
.idea/misc.xml
generated
Normal file
@ -0,0 +1,4 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12" project-jdk-type="Python SDK" />
|
||||
</project>
|
||||
8
.idea/modules.xml
generated
Normal file
8
.idea/modules.xml
generated
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/nonebot-plugin-nailongremove.iml" filepath="$PROJECT_DIR$/.idea/nonebot-plugin-nailongremove.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
||||
8
.idea/nonebot-plugin-nailongremove.iml
generated
Normal file
8
.idea/nonebot-plugin-nailongremove.iml
generated
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
6
.idea/vcs.xml
generated
Normal file
6
.idea/vcs.xml
generated
Normal file
@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
@ -38,6 +38,7 @@ else:
|
||||
file_path = os.path.join(str(config.nailong_model_dir), FILENAME)
|
||||
model_info = api.model_info(REPO_ID)
|
||||
|
||||
|
||||
def get_file_last_modified_time(file_path):
|
||||
try:
|
||||
timestamp = os.path.getmtime(file_path)
|
||||
@ -49,6 +50,7 @@ else:
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
|
||||
local_time = get_file_last_modified_time(file_path)
|
||||
if local_time is None or model_info.last_modified >= local_time:
|
||||
hf_hub_download(
|
||||
@ -76,7 +78,7 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
|
||||
input_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
||||
|
||||
if not os.path.exists(
|
||||
os.path.join(str(config.nailong_model_dir), "online_temp"),
|
||||
os.path.join(str(config.nailong_model_dir), "online_temp"),
|
||||
):
|
||||
os.makedirs(os.path.join(str(config.nailong_model_dir), "online_temp"))
|
||||
image_path = os.path.join(
|
||||
@ -100,8 +102,8 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
|
||||
)
|
||||
os.remove(image_path)
|
||||
if (
|
||||
"检测到的目标数量: " in result_info
|
||||
and int(result_info.split("检测到的目标数量: ")[1].split("\n")[0]) < 1
|
||||
"检测到的目标数量: " in result_info
|
||||
and int(result_info.split("检测到的目标数量: ")[1].split("\n")[0]) < 1
|
||||
):
|
||||
return CheckSingleResult(ok=False, label=None, extra=frame)
|
||||
if isinstance(result_image, str):
|
||||
@ -142,9 +144,9 @@ def _check_single(frame: np.ndarray, is_gif: bool = False) -> CheckSingleResult:
|
||||
|
||||
if pad_w > 0 or pad_h > 0:
|
||||
result_img = result_img[
|
||||
pad_h // 2 : pad_h // 2 + original_size[1],
|
||||
pad_w // 2 : pad_w // 2 + original_size[0],
|
||||
]
|
||||
pad_h // 2: pad_h // 2 + original_size[1],
|
||||
pad_w // 2: pad_w // 2 + original_size[0],
|
||||
]
|
||||
return CheckSingleResult(ok=True, label="nailong", extra=result_img)
|
||||
|
||||
|
||||
|
||||
@ -83,8 +83,8 @@ class FrameInfo:
|
||||
|
||||
@run_sync
|
||||
def _check_single(
|
||||
frame: np.ndarray,
|
||||
is_gif: bool = False,
|
||||
frame: np.ndarray,
|
||||
is_gif: bool = False,
|
||||
) -> CheckSingleResult[Optional[Detections]]:
|
||||
if is_gif:
|
||||
res = similarity_process(frame)
|
||||
@ -127,8 +127,8 @@ def _check_single(
|
||||
|
||||
|
||||
async def check_single(
|
||||
frame: np.ndarray,
|
||||
is_gif: bool = False,
|
||||
frame: np.ndarray,
|
||||
is_gif: bool = False,
|
||||
) -> CheckSingleResult[FrameInfo]:
|
||||
if is_gif:
|
||||
res = await _check_single(frame, True)
|
||||
|
||||
@ -35,6 +35,8 @@ if config.nailong_similarity_on:
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=[0.5], std=[0.5]) # Assuming grayscale or single-channel
|
||||
])
|
||||
|
||||
|
||||
class MyModel(
|
||||
nn.Module,
|
||||
PyTorchModelHubMixin,
|
||||
@ -46,36 +48,41 @@ if config.nailong_similarity_on:
|
||||
|
||||
def forward(self, x):
|
||||
return self.resnet(x)
|
||||
features_model = MyModel.from_pretrained("refoundd/NailongFeatures",).to(device)
|
||||
index_path=config.nailong_model_dir / 'records.index'
|
||||
json_path=config.nailong_model_dir / 'records.json'
|
||||
|
||||
|
||||
features_model = MyModel.from_pretrained("refoundd/NailongFeatures", ).to(device)
|
||||
index_path = config.nailong_model_dir / 'records.index'
|
||||
json_path = config.nailong_model_dir / 'records.json'
|
||||
if os.path.exists(index_path):
|
||||
index=faiss.read_index(str(index_path))
|
||||
index = faiss.read_index(str(index_path))
|
||||
else:
|
||||
index=faiss.IndexFlatL2(512)
|
||||
index = faiss.IndexFlatL2(512)
|
||||
if os.path.exists(json_path):
|
||||
with open(json_path, 'r') as f:
|
||||
index_cls=json.load(f)
|
||||
index_cls = json.load(f)
|
||||
else:
|
||||
index_cls= {}
|
||||
index_cls = {}
|
||||
if torch.cuda.is_available():
|
||||
try:
|
||||
res = faiss.StandardGpuResources() # 创建GPU资源
|
||||
index = faiss.index_cpu_to_gpu(res, 0, index) # 将CPU索引转移到GPU
|
||||
except Exception as e:
|
||||
logger.warning("load faiss-gpu failed.Please check your GPU device and install faiss-gpu first.")
|
||||
|
||||
|
||||
def hook(model, input, output):
|
||||
embeddings=input[0]
|
||||
embeddings = input[0]
|
||||
vector = embeddings.detach().cpu().numpy().astype(np.float32)
|
||||
faiss.normalize_L2(vector)
|
||||
global index
|
||||
d, i = index.search(vector, 1)
|
||||
return 1-d[0][0],i[0][0],vector
|
||||
return 1 - d[0][0], i[0][0], vector
|
||||
|
||||
|
||||
features_model.resnet.fc.register_forward_hook(hook)
|
||||
features_model.eval()
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckSingleResult(Generic[T]):
|
||||
ok: bool
|
||||
@ -105,9 +112,9 @@ FrameChecker: TypeAlias = Callable[
|
||||
|
||||
|
||||
async def race_check(
|
||||
checker: FrameChecker[T],
|
||||
frames: FrameSource,
|
||||
concurrency: int = config.nailong_concurrency,
|
||||
checker: FrameChecker[T],
|
||||
frames: FrameSource,
|
||||
concurrency: int = config.nailong_concurrency,
|
||||
) -> Optional[CheckSingleResult[T]]:
|
||||
iterator = iter(frames)
|
||||
if config.nailong_similarity_on:
|
||||
@ -175,40 +182,40 @@ async def race_check(
|
||||
return None
|
||||
|
||||
|
||||
def similarity_process(image1: np.ndarray, dsize=(224,224),similarity_threshold=1) -> Optional[CheckSingleResult]:
|
||||
def similarity_process(image1: np.ndarray, dsize=(224, 224), similarity_threshold=1) -> Optional[CheckSingleResult]:
|
||||
# image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
|
||||
image1 = cv2.resize(image1, dsize, interpolation=cv2.INTER_LINEAR)
|
||||
image1_tensor = transform(image1).unsqueeze(0).to(device)
|
||||
# image1_tensor = (
|
||||
# torch.tensor(image1, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
|
||||
# ).to(device)
|
||||
distance,indice,_=features_model(image1_tensor)
|
||||
distance, indice, _ = features_model(image1_tensor)
|
||||
if distance >= similarity_threshold:
|
||||
label =index_cls[str(indice)]
|
||||
label = index_cls[str(indice)]
|
||||
return CheckSingleResult(ok=True, label=label, extra=None)
|
||||
return None
|
||||
|
||||
|
||||
def process_gif_and_save_jpgs(frames, label, dsize=(224,224), similarity_threshold=1):
|
||||
def process_gif_and_save_jpgs(frames, label, dsize=(224, 224), similarity_threshold=1):
|
||||
if (
|
||||
len(
|
||||
list(
|
||||
glob.glob(
|
||||
str(config.nailong_model_dir / "records/*/*.jpg")
|
||||
len(
|
||||
list(
|
||||
glob.glob(
|
||||
str(config.nailong_model_dir / "records/*/*.jpg")
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
>= config.nailong_similarity_max_storage and config.nailong_hf_token is not None
|
||||
)
|
||||
>= config.nailong_similarity_max_storage and config.nailong_hf_token is not None
|
||||
):
|
||||
zip_filename = shutil.make_archive(
|
||||
config.nailong_model_dir / "{}_records".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")),
|
||||
config.nailong_model_dir / "{}_records".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")),
|
||||
"zip",
|
||||
config.nailong_model_dir / "records"
|
||||
)
|
||||
shutil.rmtree(config.nailong_model_dir / "records")
|
||||
from huggingface_hub import HfApi
|
||||
api = HfApi()
|
||||
commitInfo=api.upload_file(
|
||||
commitInfo = api.upload_file(
|
||||
path_or_fileobj=zip_filename,
|
||||
path_in_repo="new_dataset.zip",
|
||||
repo_id="refoundd/NailongClassification",
|
||||
@ -219,10 +226,10 @@ def process_gif_and_save_jpgs(frames, label, dsize=(224,224), similarity_thresho
|
||||
# os.remove(zip_filename)
|
||||
else:
|
||||
commitInfo = None
|
||||
output_dir = config.nailong_model_dir / "records"/ label
|
||||
output_dir = config.nailong_model_dir / "records" / label
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
count=0
|
||||
count = 0
|
||||
for frame in frames:
|
||||
frame_filename = os.path.join(
|
||||
output_dir,
|
||||
@ -235,19 +242,19 @@ def process_gif_and_save_jpgs(frames, label, dsize=(224,224), similarity_thresho
|
||||
frame_filename = "exist-" + frame_filename
|
||||
cv2.imwrite(frame_filename, frame)
|
||||
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
frame = cv2.resize(frame, dsize,interpolation=cv2.INTER_LINEAR)
|
||||
frame = cv2.resize(frame, dsize, interpolation=cv2.INTER_LINEAR)
|
||||
image1_tensor = transform(frame).unsqueeze(0).to(device)
|
||||
# image1_tensor = (
|
||||
# torch.tensor(frame, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
|
||||
# ).to(device)
|
||||
d,i,features=features_model(image1_tensor)
|
||||
d, i, features = features_model(image1_tensor)
|
||||
if d >= similarity_threshold:
|
||||
index_cls[str(i)]=label
|
||||
index_cls[str(i)] = label
|
||||
else:
|
||||
index.add(features)
|
||||
index_cls[str(index.ntotal-1)]=label
|
||||
count+=1
|
||||
index_cls[str(index.ntotal - 1)] = label
|
||||
count += 1
|
||||
faiss.write_index(index, str(index_path))
|
||||
with open(json_path,'w') as f:
|
||||
json.dump(index_cls,f)
|
||||
with open(json_path, 'w') as f:
|
||||
json.dump(index_cls, f)
|
||||
return commitInfo
|
||||
|
||||
Loading…
Reference in New Issue
Block a user