classMyModule:def__init__(self,*args,**kwargs):# your init codepassdef__call__(self,data):img=data['image']label=data['label']# your process codedata['image']=imgdata['label']=labelreturndata
# angle class data processtransforms:-DecodeImage:# load imageimg_mode:BGRchannel_first:False-MyModule:args1:args1args2:args2-KeepKeys:keep_keys:['image','label']# dataloader will return list in this order
importpaddleimportpaddle.nnasnnimportpaddle.nn.functionalasFclassMyBackbone(nn.Layer):def__init__(self,*args,**kwargs):super(MyBackbone,self).__init__()# your init codeself.conv=nn.xxxxdefforward(self,inputs):# your network forwardy=self.conv(inputs)returny
importpaddleclassMyPostProcess:def__init__(self,*args,**kwargs):# your init codepassdef__call__(self,preds,label=None,*args,**kwargs):ifisinstance(preds,paddle.Tensor):preds=preds.numpy()# you preds decode codepreds=self.decode_preds(preds)iflabelisNone:returnpreds# you label decode codelabel=self.decode_label(label)returnpreds,labeldefdecode_preds(self,preds):# you preds decode codepassdefdecode_label(self,preds):# you label decode codepass
importpaddlefrompaddleimportnnclassMyLoss(nn.Layer):def__init__(self,**kwargs):super(MyLoss,self).__init__()# you init codepassdef__call__(self,predicts,batch):label=batch[1]# your loss codeloss=self.loss(input=predicts,label=label)return{'loss':loss}
classMyMetric(object):def__init__(self,main_indicator='acc',**kwargs):# main_indicator is used for select best modelself.main_indicator=main_indicatorself.reset()def__call__(self,preds,batch,*args,**kwargs):# preds is out of postprocess# batch is out of dataloaderlabels=batch[1]cur_correct_num=0cur_all_num=0# you metric codeself.correct_num+=cur_correct_numself.all_num+=cur_all_numreturn{'acc':cur_correct_num/cur_all_num,}defget_metric(self):""" return metrics { 'acc': 0, 'norm_edit_dis': 0, } """acc=self.correct_num/self.all_numself.reset()return{'acc':acc}defreset(self):# reset metricself.correct_num=0self.all_num=0
frompaddleimportoptimizerasoptimclassMyOptim(object):def__init__(self,learning_rate=0.001,*args,**kwargs):self.learning_rate=learning_ratedef__call__(self,parameters):# It is recommended to wrap the built-in optimizer of paddleopt=optim.XXX(learning_rate=self.learning_rate,parameters=parameters)returnopt