Multimodal Large Model Blip Code Interpretation

Article directory

  • 1) Configuration file retrieval_coco.yaml
  • 2) train_retrieval.py code

1) configuration file retrieval_coco.yaml

dataset: 'coco'image_root: '/export/share/datasets/vision/coco/images/' # image root directory
ann_root: 'annotation' # annotation root directory
dataset : coco # data augmentation

# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' # pre-trained model path

# size of vit model; base or large
vit: 'base' # vit model size, optional 'base' or 'large'
batch_size_train: 32 # training batch size
batch_size_test: 64 # test batch size
vit_grad_ckpt: True # Whether to use vit gradient checkpoint
vit_ckpt_layer: 4 # vit checkpoint layer
init_lr: 1e-5 # initial learning rate

image_size: 384 # image size
queue_size: 57600 # Queue size
alpha: 0.4 # Hyperparameters in the loss function
k_test: 256 # k value during test
negative_all_rank: True # Whether to use all negative samples for ranking

# optimizer
weight_decay: 0.05 # weight decay
min_lr: 0 # minimum learning rate
max_epoch: 6 # Maximum number of training rounds

2) train_retrieval.py code

parser = argparse.ArgumentParser() # create an argument parser
    parser.add_argument('--config', default='./configs/retrieval_flickr.yaml') # Add a parameter '--config', the default value is './configs/retrieval_flickr.yaml '
    parser.add_argument('--output_dir', default='output/Retrieval_flickr') # Add a parameter '--output_dir', the default value is'output/Retrieval_flickr'
    parser.add_argument('--evaluate', action='store_true') # Add a parameter '--evaluate', if it exists, set its value to True
    parser.add_argument('--device', default='cuda') # Add a parameter '--device', the default value is'cuda'
    parser.add_argument('--seed', default=42, type=int) # Add a parameter '--seed', the default value is 42, and the type is integer
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') # Add a parameter '--world_size', the default value is 1, and the type is Integer, the help information is 'number of distributed processes'
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') # Add a parameter '--dist_url', default The value is 'env://', and the help information is 'URL for setting distributed training'
    parser.add_argument('--distributed', default=True, type=bool) # Add a parameter '--distributed', the default value is True, and the type is Boolean
    args = parser.parse_args() # parse parameters

    config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader) # load configuration from file

    Path(args.output_dir).mkdir(parents=True, exist_ok=True) # create output directory
        
    yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w')) # save configuration to file
    
    main(args, config) # call the main function, pass in the parameters args and config


def main(args,config)
    # Initialize distributed mode
    utils.init_distributed_mode(args)
        
    # get device
    device = torch.device(args.device)

    # Set the random number seed
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np. random. seed(seed)
    random. seed(seed)
    cudnn.benchmark = True

    #### data set #### 
    # create retrieved dataset
    print("Creating retrieval dataset")
    train_dataset, val_dataset, test_dataset = create_dataset('retrieval_%s'%config['dataset'], config)

    if args. distributed:
        num_tasks = utils. get_world_size()
        global_rank = utils. get_rank()
        samples = create_sampler([train_dataset], [True], num_tasks, global_rank) + [None, None]
    else:
        samplers = [None, None, None]

    # create data loader
    train_loader, val_loader, test_loader = create_loader([train_dataset, val_dataset, test_dataset],samplers,
                                                        batch_size=[config['batch_size_train']] + [config['batch_size_test']]*2,
                                                        num_workers=[4,4,4],
                                                        is_trains=[True, False, False],
                                                        collate_fns=[None,None,None])


#### Model #### 
    # create model
    print("create model")
    model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'],
                             vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'],
                             queue_size=config['queue_size'], negative_all_rank=config['negative_all_rank'])

    model = model.to(device)
    
    model_without_ddp = model
    if args. distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module

    optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
    
    best = 0
    best_epoch = 0

    # start training
    print("Start training")
    start_time = time. time()

    for epoch in range(0, config['max_epoch']):
        if not args.evaluate:
            if args. distributed:
                train_loader.sampler.set_epoch(epoch)
                
            cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
            
            train_stats = train(model, train_loader, optimizer, epoch, device, config)
            
        score_val_i2t, score_val_t2i, = evaluation(model_without_ddp, val_loader, device, config)
        score_test_i2t, score_test_t2i = evaluation(model_without_ddp, test_loader, device, config)
    
        if utils.is_main_process():
      
            val_result = itm_eval(score_val_i2t, score_val_t2i, val_loader.dataset.txt2img, val_loader.dataset.img2txt)
            print(val_result)
                                
            if val_result['r_mean']>best:
                save_obj = {<!-- -->
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer. state_dict(),
                    'config': config,
                    'epoch': epoch,
                }
                torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
                best = val_result['r_mean']
                best_epoch = epoch
                
                test_result = itm_eval(score_test_i2t, score_test_t2i, test_loader.dataset.txt2img, test_loader.dataset.img2txt)
                print(test_result)
            
            if args. evaluate:
                log_stats = {<!-- -->**{<!-- -->f'val_{<!-- -->k}': v for k, v in val_result.items()},
                             **{<!-- -->f'test_{<!-- -->k}': v for k, v in test_result.items()},
                            }
                with open(os.path.join(args.output_dir, "evaluate.txt"),"a") as f:
                    f.write(json.dumps(log_stats) + "\\
")
            else:
                log_stats = {<!-- -->**{<!-- -->f'train_{<!-- -->k}': v for k, v in train_stats.items()},
                             **{<!-- -->f'val_{<!-- -->k}': v for k, v in val_result.items()},
                             **{<!-- -->f'test_{<!-- -->k}': v for k, v in test_result.items()},
                             'epoch': epoch,



def train(model, data_loader, optimizer, epoch, device, config):
    # train
    model. train()
    
    # Initialize metrics logger
    metric_logger = utils. MetricLogger(delimiter="")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
    metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
    header = 'Train Epoch: [{}]'.format(epoch)
    print_freq = 50

    # Loop through the dataset
    for i,(image, caption, idx) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # move the data to the device
        image = image.to(device,non_blocking=True)
        idx = idx.to(device,non_blocking=True)
       
        # calculate alpha
        if epoch>0:
            alpha = config['alpha']
        else:
            alpha = config['alpha']*min(1,i/len(data_loader))


        loss_ita, loss_itm = model(image, caption, alpha=alpha, idx=idx)
        loss = loss_ita + loss_itm
        
        optimizer. zero_grad()
        loss. backward()
        optimizer. step()
        
        metric_logger.update(loss_itm=loss_itm.item())
        metric_logger.update(loss_ita=loss_ita.item())
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger.global_avg())
    return {<!-- -->k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}

# The following is the training function, including model training, index recording, etc.
# model: model
# data_loader: data loader
# optimizer: optimizer
# epoch: current number of training rounds
# device: training device
# config: configuration parameters
# define evaluation function


@torch.no_grad()
def evaluation(model, data_loader, device, config):
    # test
    model.eval()
    
    # define metrics logger
    metric_logger = utils. MetricLogger(delimiter="")
    header = 'Evaluation:'
    
    # Compute text features
    print('Computing features for evaluation...')
    start_time = time. time()

    texts = data_loader.dataset.text
    num_text = len(texts)
    text_bs = 256
    text_ids = []
    text_embeds = []
    text_atts = []
    for i in range(0, num_text, text_bs):
        text = texts[i: min(num_text, i + text_bs)]
        text_input = model.tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device)
        text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')
        text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:]))
        text_embeds.append(text_embed)
        text_ids.append(text_input.input_ids)
        text_atts.append(text_input.attention_mask)
    
    text_embeds = torch.cat(text_embeds,dim=0)
    text_ids = torch.cat(text_ids,dim=0)
    text_atts = torch.cat(text_atts,dim=0)
    text_ids[:,0] = model.tokenizer.enc_token_id
    
    # Compute image features
    image_feats = []
    image_embeds = []
    for image, img_id in data_loader:
        image = image.to(device)
        image_feat = model.visual_encoder(image)
        image_embed = model.vision_proj(image_feat[:,0,:])
        image_embed = F.normalize(image_embed,dim=-1)
        
        image_feats.append(image_feat.cpu())
        image_embeds.append(image_embed)
     
    image_feats = torch.cat(image_feats,dim=0)
    image_embeds = torch.cat(image_embeds,dim=0)
    
    # Calculate the similarity matrix
    sims_matrix = image_embeds @ text_embeds.t()
    score_matrix_i2t = torch.full((len(data_loader.dataset.image),len(texts)),-100.0).to(device)
    
    num_tasks = utils. get_world_size()
    rank = utils. get_rank()
    step = sims_matrix.size(0)//num_tasks + 1
    start = rank*step
    end = min(sims_matrix. size(0),start + step)

    # Compute the image-to-text score matrix
    for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
        topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)

        encoder_output = image_feats[start + i].repeat(config['k_test'],1,1).to(device)
        encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
        output = model.text_encoder(text_ids[topk_idx],
                                    attention_mask = text_atts[topk_idx],
                                    encoder_hidden_states = encoder_output,
                                    encoder_attention_mask = encoder_att,
                                    return_dict = True,
                                   )
        score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
        score_matrix_i2t[start + i,topk_idx] = score + topk_sim
        
    sims_matrix = sims_matrix.t()
    score_matrix_t2i = torch.full((len(texts),len(data_loader.dataset.image)),-100.0).to(device)
    
    step = sims_matrix.size(0)//num_tasks + 1
    start = rank*step
    end = min(sims_matrix. size(0),start + step)
    
    # Compute the text-to-image score matrix
    for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
        
        topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
        encoder_output = image_feats[topk_idx].to(device)



def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt):
    
    # Compute the image-to-text rank
    ranks = np.zeros(scores_i2t.shape[0])
    for index, score in enumerate(scores_i2t):
        inds = np.argsort(score)[::-1]
        # calculate rank
        rank = 1e20
        for i in img2txt[index]:
            tmp = np.where(inds == i)[0][0]
            if tmp < rank:
                rank = tmp
        ranks[index] = rank

    # Compute metrics
    tr1 = 100.0 * len(np. where(ranks < 1)[0]) / len(ranks)
    tr5 = 100.0 * len(np. where(ranks < 5)[0]) / len(ranks)
    tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
  
    # Calculate text-to-image rank
    ranks = np.zeros(scores_t2i.shape[0])
    
    for index, score in enumerate(scores_t2i):
        inds = np.argsort(score)[::-1]
        ranks[index] = np.where(inds == txt2img[index])[0][0]

    # Compute metrics
    ir1 = 100.0 * len(np. where(ranks < 1)[0]) / len(ranks)
    ir5 = 100.0 * len(np. where(ranks < 5)[0]) / len(ranks)
    ir10 = 100.0 * len(np. where(ranks < 10)[0]) / len(ranks)

    tr_mean = (tr1 + tr5 + tr10) / 3
    ir_mean = (ir1 + ir5 + ir10) / 3
    r_mean = (tr_mean + ir_mean) / 2

    # return evaluation results
    eval_result = {<!-- -->'txt_r1': tr1,
                    'txt_r5': tr5,
                    'txt_r10': tr10,
                    'txt_r_mean': tr_mean,
                    'img_r1': ir1,
                    'img_r5': ir5,
                    'img_r10': ir10,
                    'img_r_mean': ir_mean,
                    'r_mean': r_mean}
    return eval_result