Instructions to use OpenGVLab/VideoChat-TPO with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use OpenGVLab/VideoChat-TPO with Transformers:
# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("OpenGVLab/VideoChat-TPO", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
| import logging | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import torch.utils.checkpoint as checkpoint | |
| from functools import partial | |
| from timm.models.layers import drop_path, to_2tuple, trunc_normal_ | |
| logger = logging.getLogger(__name__) | |
| def _cfg(url='', **kwargs): | |
| return { | |
| 'url': url, | |
| 'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None, | |
| 'crop_pct': .9, 'interpolation': 'bicubic', | |
| 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), | |
| **kwargs | |
| } | |
| class MLP(nn.Module): | |
| """Very simple multi-layer perceptron (also called FFN)""" | |
| def __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout=0): | |
| super().__init__() | |
| self.num_layers = num_layers | |
| h = [hidden_dim] * (num_layers - 1) | |
| self.layers = nn.ModuleList( | |
| nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) | |
| ) | |
| self.dropout = dropout | |
| if dropout: | |
| self.dropout = nn.Dropout(dropout) | |
| def forward(self, x): | |
| for i, layer in enumerate(self.layers): | |
| x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) | |
| if self.dropout and i < self.num_layers: | |
| x = self.dropout(x) | |
| return x | |
| class PostProcess(nn.Module): | |
| """ This module converts the model's output into the format expected by the coco api""" | |
| def forward(self, out_sted, frames_id): | |
| """Perform the computation for inference evaluation | |
| """ | |
| # import pdb; pdb.set_trace() | |
| b, t, _ = out_sted.shape | |
| device = out_sted.device | |
| temp_prob_map = torch.zeros(b,t,t).to(device) | |
| inf = -1e32 | |
| for i_b in range(len(frames_id)): | |
| duration = len(frames_id[0]) | |
| sted_prob = (torch.ones(t, t) * inf).tril(0).to(device) | |
| sted_prob[duration:,:] = inf | |
| sted_prob[:,duration:] = inf | |
| temp_prob_map[i_b,:,:] = sted_prob | |
| temp_prob_map += F.log_softmax(out_sted[:, :, 0], dim=1).unsqueeze(2) + \ | |
| F.log_softmax(out_sted[:, :, 1], dim=1).unsqueeze(1) | |
| pred_steds = [] | |
| for i_b in range(b): | |
| prob_map = temp_prob_map[i_b] # [T * T] | |
| frame_id_seq = frames_id[i_b] | |
| prob_seq = prob_map.flatten(0) | |
| max_tstamp = prob_seq.max(dim=0)[1].item() | |
| start_idx = max_tstamp // t | |
| end_idx = max_tstamp % t | |
| pred_sted = [frame_id_seq[start_idx], frame_id_seq[end_idx]+1] | |
| pred_steds.append(pred_sted) | |
| return pred_steds | |
| class DropPath(nn.Module): | |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). | |
| """ | |
| def __init__(self, drop_prob=None): | |
| super(DropPath, self).__init__() | |
| self.drop_prob = drop_prob | |
| def forward(self, x): | |
| return drop_path(x, self.drop_prob, self.training) | |
| def extra_repr(self) -> str: | |
| return 'p={}'.format(self.drop_prob) | |
| class Mlp(nn.Module): | |
| def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): | |
| super().__init__() | |
| out_features = out_features or in_features | |
| hidden_features = hidden_features or in_features | |
| self.fc1 = nn.Linear(in_features, hidden_features) | |
| self.act = act_layer() | |
| self.fc2 = nn.Linear(hidden_features, out_features) | |
| self.drop = nn.Dropout(drop) | |
| def forward(self, x): | |
| x = self.fc1(x) | |
| x = self.act(x) | |
| x = self.drop(x) | |
| x = self.fc2(x) | |
| x = self.drop(x) | |
| return x | |
| class Attention(nn.Module): | |
| def __init__( | |
| self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., | |
| proj_drop=0., attn_head_dim=None): | |
| super().__init__() | |
| self.num_heads = num_heads | |
| head_dim = dim // num_heads | |
| if attn_head_dim is not None: | |
| head_dim = attn_head_dim | |
| all_head_dim = head_dim * self.num_heads | |
| self.scale = qk_scale or head_dim ** -0.5 | |
| self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) | |
| if qkv_bias: | |
| self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) | |
| self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) | |
| else: | |
| self.q_bias = None | |
| self.v_bias = None | |
| self.attn_drop = nn.Dropout(attn_drop) | |
| self.proj = nn.Linear(all_head_dim, dim) | |
| self.proj_drop = nn.Dropout(proj_drop) | |
| def forward(self, x): | |
| B, N, C = x.shape | |
| qkv_bias = None | |
| if self.q_bias is not None: | |
| qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) | |
| # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
| qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) | |
| qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) | |
| q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) | |
| q = q * self.scale | |
| attn = (q @ k.transpose(-2, -1)) | |
| attn = attn.softmax(dim=-1) | |
| attn = self.attn_drop(attn) | |
| x = (attn @ v).transpose(1, 2).reshape(B, N, -1) | |
| x = self.proj(x) | |
| x = self.proj_drop(x) | |
| return x | |
| class Block(nn.Module): | |
| def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., | |
| drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, | |
| attn_head_dim=None): | |
| super().__init__() | |
| self.norm1 = norm_layer(dim) | |
| self.attn = Attention( | |
| dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, | |
| attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim) | |
| # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here | |
| self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() | |
| self.norm2 = norm_layer(dim) | |
| mlp_hidden_dim = int(dim * mlp_ratio) | |
| self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) | |
| if init_values > 0: | |
| self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) | |
| self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) | |
| else: | |
| self.gamma_1, self.gamma_2 = None, None | |
| def forward(self, x): | |
| if self.gamma_1 is None: | |
| x = x + self.drop_path(self.attn(self.norm1(x))) | |
| x = x + self.drop_path(self.mlp(self.norm2(x))) | |
| else: | |
| x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) | |
| x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) | |
| return x | |
| class PatchEmbed(nn.Module): | |
| """ Image to Patch Embedding | |
| """ | |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2): | |
| super().__init__() | |
| img_size = to_2tuple(img_size) | |
| patch_size = to_2tuple(patch_size) | |
| self.tubelet_size = int(tubelet_size) | |
| num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) | |
| self.img_size = img_size | |
| self.patch_size = patch_size | |
| self.num_patches = num_patches | |
| self.proj = nn.Conv3d( | |
| in_channels=in_chans, out_channels=embed_dim, | |
| kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), | |
| stride=(self.tubelet_size, patch_size[0], patch_size[1]) | |
| ) | |
| logger.info(f'Num of patches: {num_patches}') | |
| def forward(self, x, **kwargs): | |
| B, C, T, H, W = x.shape | |
| # FIXME look at relaxing size constraints | |
| # assert H == self.img_size[0] and W == self.img_size[1], \ | |
| # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." | |
| x = self.proj(x).flatten(2).transpose(1, 2) | |
| return x | |
| # sin-cos position encoding | |
| # https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 | |
| def get_sinusoid_encoding_table(n_position, d_hid, ckpt_num_frame=-1, cur_frame=12): | |
| ''' Sinusoid position encoding table ''' | |
| # TODO: make it with torch instead of numpy | |
| def get_position_angle_vec(position): | |
| return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] | |
| if ckpt_num_frame != -1 and ckpt_num_frame != cur_frame: | |
| logger.info(f"Interpolate position embedding") | |
| logger.info(f"Testing frame: {cur_frame}") | |
| logger.info(f"Checkpoint frame: {ckpt_num_frame}") | |
| T = ckpt_num_frame # checkpoint frame | |
| new_T = cur_frame # testing frame | |
| n_position = n_position // new_T * T # generate checkpoint position embedding | |
| sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) | |
| sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i | |
| sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 | |
| sinusoid_table = torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0) | |
| # interpolate | |
| P = int((n_position // T) ** 0.5) | |
| C = d_hid | |
| sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) | |
| sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW, C, T | |
| sinusoid_table = torch.nn.functional.interpolate(sinusoid_table, size=new_T, mode='linear') | |
| sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3) # B, T, H, W, C | |
| sinusoid_table = sinusoid_table.flatten(1, 3) | |
| return sinusoid_table | |
| else: | |
| sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) | |
| sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i | |
| sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 | |
| return torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0) | |
| def get_sinusoid_encoding_table2(n_position=784, d_hid=1024, cur_frame=8, ckpt_num_frame=4, pre_n_position=784): | |
| ''' Sinusoid position encoding table ''' | |
| # TODO: make it with torch instead of numpy | |
| def get_position_angle_vec(position): | |
| return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] | |
| # generate checkpoint position embedding | |
| sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(pre_n_position)]) | |
| sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i | |
| sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 | |
| sinusoid_table = torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0) | |
| print(f"n_position: {n_position}") | |
| print(f"pre_n_position: {pre_n_position}") | |
| if n_position != pre_n_position: | |
| T = ckpt_num_frame # checkpoint frame | |
| P = 14 # checkpoint size | |
| C = d_hid | |
| new_P = int((n_position // cur_frame) ** 0.5) # testing size | |
| print(f'Pretraining uses 14x14, but current version is {new_P}x{new_P}') | |
| print(f'Interpolate the position embedding') | |
| sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) | |
| sinusoid_table = sinusoid_table.reshape(-1, P, P, C).permute(0, 3, 1, 2) | |
| sinusoid_table = torch.nn.functional.interpolate( | |
| sinusoid_table, size=(new_P, new_P), mode='bicubic', align_corners=False) | |
| # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C | |
| sinusoid_table = sinusoid_table.permute(0, 2, 3, 1).reshape(-1, T, new_P, new_P, C) | |
| sinusoid_table = sinusoid_table.flatten(1, 3) # B, THW, C | |
| if cur_frame != ckpt_num_frame: | |
| print(f'Pretraining uses 4 frames, but current frame is {cur_frame}') | |
| print(f'Interpolate the position embedding') | |
| T = ckpt_num_frame # checkpoint frame | |
| new_T = cur_frame # testing frame | |
| # interpolate | |
| P = int((n_position // cur_frame) ** 0.5) # testing size | |
| C = d_hid | |
| sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) | |
| sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW, C, T | |
| sinusoid_table = torch.nn.functional.interpolate(sinusoid_table, size=new_T, mode='linear') | |
| sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3) # B, T, H, W, C | |
| sinusoid_table = sinusoid_table.flatten(1, 3) # B, THW, C | |
| return sinusoid_table | |
| class PretrainVisionTransformerEncoder(nn.Module): | |
| """ Vision Transformer with support for patch or hybrid CNN input stage | |
| """ | |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, depth=12, | |
| num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., | |
| drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, num_frames=8, tubelet_size=1, | |
| use_learnable_pos_emb=False, | |
| use_checkpoint=False, checkpoint_num=0, | |
| ckpt_num_frame=-1, with_ln=True, return_index=-1 | |
| ): | |
| super().__init__() | |
| self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models | |
| self.patch_embed = PatchEmbed( | |
| img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, | |
| num_frames=num_frames, tubelet_size=tubelet_size | |
| ) | |
| num_patches = self.patch_embed.num_patches | |
| self.depth = depth + return_index + 1 | |
| self.use_checkpoint = use_checkpoint | |
| self.checkpoint_num = checkpoint_num | |
| logger.info(f"Use checkpoint: {use_checkpoint}") | |
| logger.info(f"Checkpoint number: {checkpoint_num}") | |
| logger.info(f"Real runing depth: {self.depth}") | |
| # TODO: Add the cls token | |
| if use_learnable_pos_emb: | |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) | |
| self.img_pos_embed = nn.Parameter(torch.zeros(1, num_patches//(num_frames//tubelet_size) + 1, embed_dim)) | |
| else: | |
| # sine-cosine positional embeddings | |
| if img_size != 224: | |
| self.pos_embed = get_sinusoid_encoding_table2(num_patches, embed_dim, ckpt_num_frame=ckpt_num_frame, cur_frame=num_frames//tubelet_size) | |
| self.img_pos_embed = get_sinusoid_encoding_table2(num_patches//(num_frames//tubelet_size), embed_dim, cur_frame=1, ckpt_num_frame=1, pre_n_position=14*14) | |
| else: | |
| self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim, ckpt_num_frame=ckpt_num_frame, cur_frame=num_frames//tubelet_size) | |
| self.img_pos_embed = get_sinusoid_encoding_table(num_patches//(num_frames//tubelet_size), embed_dim) | |
| dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule | |
| self.blocks = nn.ModuleList([ | |
| Block( | |
| dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, | |
| drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, | |
| init_values=init_values) | |
| for i in range(self.depth)]) | |
| if with_ln: | |
| self.norm = norm_layer(embed_dim) | |
| else: | |
| self.norm = nn.Identity() | |
| if use_learnable_pos_emb: | |
| trunc_normal_(self.pos_embed, std=.02) | |
| def no_weight_decay(self): | |
| return {'pos_embed', 'cls_token'} | |
| def forward_features(self, x, use_image=False): | |
| x = self.patch_embed(x) | |
| if use_image: | |
| x = x + self.img_pos_embed.type_as(x).to(x.device).clone().detach() | |
| else: | |
| x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() | |
| B, _, C = x.shape | |
| x_vis = x | |
| for idx, blk in enumerate(self.blocks): | |
| if self.use_checkpoint and idx < self.checkpoint_num: | |
| x_vis = checkpoint.checkpoint(blk, x_vis) | |
| else: | |
| x_vis = blk(x_vis) | |
| # with ln ot not | |
| x_vis = self.norm(x_vis) | |
| return x_vis | |
| def forward(self, x, use_image=False): | |
| x_vis = self.forward_features(x, use_image) | |
| return x_vis | |
| class PretrainVisionTransformer(nn.Module): | |
| """ Vision Transformer with support for patch or hybrid CNN input stage | |
| """ | |
| def __init__(self, | |
| img_size=224, | |
| patch_size=16, | |
| encoder_in_chans=3, | |
| encoder_embed_dim=768, | |
| encoder_depth=12, | |
| encoder_num_heads=12, | |
| mlp_ratio=4., | |
| qkv_bias=True, | |
| qk_scale=None, | |
| drop_rate=0., | |
| attn_drop_rate=0., | |
| drop_path_rate=0., | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), | |
| init_values=0., | |
| use_learnable_pos_emb=False, | |
| num_frames=8, | |
| tubelet_size=1, | |
| use_checkpoint=False, | |
| checkpoint_num=0, | |
| ckpt_num_frame=4, # the pretrained model uses 4 frames | |
| return_index=-1, | |
| with_ln=False | |
| ): | |
| super().__init__() | |
| self.encoder = PretrainVisionTransformerEncoder( | |
| img_size=img_size, | |
| patch_size=patch_size, | |
| in_chans=encoder_in_chans, | |
| embed_dim=encoder_embed_dim, | |
| depth=encoder_depth, | |
| num_heads=encoder_num_heads, | |
| mlp_ratio=mlp_ratio, | |
| qkv_bias=qkv_bias, | |
| qk_scale=qk_scale, | |
| drop_rate=drop_rate, | |
| attn_drop_rate=attn_drop_rate, | |
| drop_path_rate=drop_path_rate, | |
| norm_layer=norm_layer, | |
| init_values=init_values, | |
| num_frames=num_frames, | |
| tubelet_size=tubelet_size, | |
| use_learnable_pos_emb=use_learnable_pos_emb, | |
| use_checkpoint=use_checkpoint, | |
| checkpoint_num=checkpoint_num, | |
| ckpt_num_frame=ckpt_num_frame, | |
| with_ln=with_ln, | |
| return_index=return_index | |
| ) | |
| logger.info(f'With LN: {with_ln}') | |
| logger.info(f'Total {encoder_depth} layer') | |
| logger.info(f'Return {encoder_depth+return_index+1}-th layer') | |
| self.apply(self._init_weights) | |
| def _init_weights(self, m): | |
| if isinstance(m, nn.Linear): | |
| nn.init.xavier_uniform_(m.weight) | |
| if isinstance(m, nn.Linear) and m.bias is not None: | |
| nn.init.constant_(m.bias, 0) | |
| elif isinstance(m, nn.LayerNorm): | |
| nn.init.constant_(m.bias, 0) | |
| nn.init.constant_(m.weight, 1.0) | |
| def no_weight_decay(self): | |
| return {'pos_embed', 'cls_token', 'clip_pos_embed'} | |
| def forward(self, x, use_image=False): | |
| T = x.shape[2] | |
| x_vis = self.encoder(x, use_image) # [B, N_vis, C_e] | |
| B, TL, C = x_vis.shape | |
| x_vis = x_vis.view(B, T, TL // T, C) | |
| return x_vis | |
| def build_vit(config): | |
| model = PretrainVisionTransformer( | |
| img_size=config.vision_encoder.img_size, | |
| patch_size=config.vision_encoder.patch_size, | |
| encoder_embed_dim=config.vision_encoder.encoder_embed_dim, | |
| encoder_depth=config.vision_encoder.encoder_depth, | |
| encoder_num_heads=config.vision_encoder.encoder_num_heads, | |
| drop_path_rate=config.vision_encoder.drop_path_rate, | |
| num_frames=config.vision_encoder.num_frames, | |
| tubelet_size=config.vision_encoder.tubelet_size, | |
| use_checkpoint=config.vision_encoder.use_checkpoint, | |
| checkpoint_num=config.vision_encoder.checkpoint_num, | |
| return_index=config.vision_encoder.get('return_index', -1), | |
| with_ln=config.vision_encoder.get('with_ln', False), | |
| ) | |
| model.default_cfg = _cfg() | |
| if config.vision_encoder.pretrained: | |
| logger.info(f"Loading pretrained weights from {config.vision_encoder.pretrained}") | |
| state_dict = torch.load(config.vision_encoder.pretrained, map_location='cpu') | |
| model.load_state_dict(state_dict, strict=False) | |
| else: | |
| logger.info("No pretrained weights!!!") | |
| return model | |