From 5ed451e47da1c5eb14807b1b4739591b6180dd6f Mon Sep 17 00:00:00 2001 From: yumoqing Date: Thu, 14 Aug 2025 21:49:05 +0800 Subject: [PATCH] bugfix --- findperson/utils_clip.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/findperson/utils_clip.py b/findperson/utils_clip.py index 49278c9..cae06cf 100644 --- a/findperson/utils_clip.py +++ b/findperson/utils_clip.py @@ -4,24 +4,24 @@ from PIL import Image import torch class CLIPEmbedder: - def __init__(self): + def __init__(self): # model_id="laion/CLIP-ViT-H-14-laion2B-s32B-b79K" self.config = getConfig() model_path = self.config.clip_model_path - self.device = "cuda" if torch.cuda.is_available() else "cpu" - self.model = CLIPModel.from_pretrained(model_path).to(self.device) - self.processor = CLIPProcessor.from_pretrained(model_path) + self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.model = CLIPModel.from_pretrained(model_path).to(self.device) + self.processor = CLIPProcessor.from_pretrained(model_path) - def embed_image(self, image_path): - image = Image.open(image_path).convert("RGB") - inputs = self.processor(images=image, return_tensors="pt").to(self.device) - with torch.no_grad(): - embedding = self.model.get_image_features(**inputs) - return embedding[0].cpu().numpy() + def embed_image(self, image_path): + image = Image.open(image_path).convert("RGB") + inputs = self.processor(images=image, return_tensors="pt").to(self.device) + with torch.no_grad(): + embedding = self.model.get_image_features(**inputs) + return embedding[0].cpu().numpy() - def embed_text(self, text): - inputs = self.processor(text=text, return_tensors="pt").to(self.device) - with torch.no_grad(): - embedding = self.model.get_text_features(**inputs) - return embedding[0].cpu().numpy() + def embed_text(self, text): + inputs = self.processor(text=text, return_tensors="pt").to(self.device) + with torch.no_grad(): + embedding = self.model.get_text_features(**inputs) + return embedding[0].cpu().numpy()