0
votes
class loss(Function):
    @staticmethod
    def forward(ctx,x,INPUT):

        batch_size = x.shape[0]
        X = x.detach().numpy()
        input = INPUT.detach().numpy()
        Loss = 0
        for i in range(batch_size):
            t_R_r = input[i,0:4]
            R_r = t_R_r[np.newaxis,:]
            t_R_i = input[i,4:8]
            R_i = t_R_i[np.newaxis,:]
            t_H_r = input[i,8:12]
            H_r = t_H_r[np.newaxis,:]
            t_H_i = input[i,12:16]
            H_i = t_H_i[np.newaxis,:]

            t_T_r = input[i, 16:32]
            T_r = t_T_r.reshape(4,4)
            t_T_i = input[i, 32:48]
            T_i = t_T_i.reshape(4,4)

            R = np.concatenate((R_r, R_i), axis=1)
            H = np.concatenate((H_r, H_i), axis=1)


            temp_t1 = np.concatenate((T_r,T_i),axis=1)
            temp_t2 = np.concatenate((-T_i,T_r),axis=1)
            T = np.concatenate((temp_t1,temp_t2),axis=0)
            phi_r = np.zeros((4,4))
            row, col = np.diag_indices(4)
            phi_r[row,col] = X[i,0:4]
            phi_i = np.zeros((4, 4))
            row, col = np.diag_indices(4)
            phi_i[row, col] = 1 - np.power(X[i, 0:4],2)

            temp_phi1 = np.concatenate((phi_r,phi_i),axis=1)
            temp_phi2 = np.concatenate((-phi_i, phi_r), axis=1)
            phi = np.concatenate((temp_phi1,temp_phi2),axis=0)

            temp1 = np.matmul(R,phi)

            temp2 = np.matmul(temp1,T)  # error
            H_hat = H + temp2

            t_Q_r = np.zeros((4,4))
            t_Q_r[np.triu_indices(4,1)] = X[i,4:10]
            Q_r = t_Q_r + t_Q_r.T
            row,col = np.diag_indices(4)
            Q_r[row,col] = X[i,10:14]
            Q_i = np.zeros((4,4))
            Q_i[np.triu_indices(4,1)] = X[i,14:20]
            Q_i = Q_i - Q_i.T

            temp_Q1 = np.concatenate((Q_r,Q_i),axis=1)
            temp_Q2 = np.concatenate((-Q_i,Q_r),axis=1)
            Q = np.concatenate((temp_Q1,temp_Q2),axis=0)

            t_H_hat_r = H_hat[0,0:4]
            H_hat_r = t_H_hat_r[np.newaxis,:]
            t_H_hat_i= H_hat[0,4:8]
            H_hat_i = t_H_hat_i[np.newaxis,:]

            temp_H1 = np.concatenate((-H_hat_i.T,H_hat_r.T),axis=0)
            H_hat_H = np.concatenate((H_hat.T,temp_H1),axis=1)
            temp_result1 = np.matmul(H_hat,Q)
            temp_result2 = np.matmul(temp_result1,H_hat_H)

            Loss += np.log10(1+temp_result2[0][0])
        Loss = t.from_numpy(np.array(Loss / batch_size))
        return Loss
    @staticmethod
    def backward(ctx,grad_output):
        print('gradient')
        return grad_output
def criterion(output,input):
    return loss.apply(output,input)

This is my loss function. But it present the error: Traceback (most recent call last):

File "/Users/mrfang/channel_capacity/training.py", line 24, in loss.backward() File "/Users/mrfang/anaconda3/lib/python3.6/site-packages/torch/tensor.py", line 150, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph) File "/Users/mrfang/anaconda3/lib/python3.6/site-packages/torch/autograd/init.py", line 99, in backward allow_unreachable=True) # allow_unreachable flag RuntimeError: function lossBackward returned an incorrect number of gradients (expected 2, got 1)

How could I fix it. Thanks very much

1

1 Answers

2
votes

Your forward(ctx,x,INPUT) takes two inputs, x and INPUT, thus backward should output two gradients as well, grad_x and grad_INPUT.

In addition, in your snippet, you're not really computing a custom gradient, so you could compute that with Pytorch's autograd, without having to define a special Function.

If this is working code and you're going to define the custom loss, here's a quick boilerplate of what backward should comprise:

@staticmethod
def forward(ctx, x, INPUT):
    # this is required so they're available during the backwards call
    ctx.save_for_backward(x, INPUT)

    # custom forward

@staticmethod
def backward(ctx, grad_output):
    x, INPUT = ctx.saved_tensors
    grad_x = grad_INPUT = None

    # compute grad here

    return grad_x, grad_INPUT

You don't need to return gradients for inputs that don't require it, thus you can return None for them.

More info here and here.