Spaces:
Sleeping
Sleeping
| # Based on https://github.com/christophschuhmann/improved-aesthetic-predictor/blob/fe88a163f4661b4ddabba0751ff645e2e620746e/simple_inference.py | |
| # import ipdb | |
| # st = ipdb.set_trace | |
| from importlib_resources import files | |
| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| from transformers import CLIPModel, CLIPProcessor | |
| from PIL import Image | |
| ASSETS_PATH = files("assets") | |
| # ASSETS_PATH = "assets" | |
| class MLPDiff(nn.Module): | |
| def __init__(self): | |
| super().__init__() | |
| self.layers = nn.Sequential( | |
| nn.Linear(768, 1024), | |
| nn.Dropout(0.2), | |
| nn.Linear(1024, 128), | |
| nn.Dropout(0.2), | |
| nn.Linear(128, 64), | |
| nn.Dropout(0.1), | |
| nn.Linear(64, 16), | |
| nn.Linear(16, 1), | |
| ) | |
| def forward(self, embed): | |
| return self.layers(embed) | |
| class AestheticScorerDiff(torch.nn.Module): | |
| def __init__(self, dtype): | |
| super().__init__() | |
| self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") | |
| self.mlp = MLPDiff() | |
| state_dict = torch.load(ASSETS_PATH.joinpath("sac+logos+ava1-l14-linearMSE.pth")) | |
| self.mlp.load_state_dict(state_dict) | |
| self.dtype = dtype | |
| self.eval() | |
| def __call__(self, images): | |
| device = next(self.parameters()).device | |
| embed = self.clip.get_image_features(pixel_values=images) | |
| embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True) | |
| return self.mlp(embed).squeeze(1) | |