in tensorflow_quantum/python/differentiators/differentiator.py [0:0]
def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None):
"""Generate a differentiable op by attaching self to an op.
This function returns a `tf.function` that passes values through to
`forward_op` during the forward pass and this differentiator (`self`) to
backpropagate through the op during the backward pass. If sampled_op
is provided the differentiators `differentiate_sampled` method will
be invoked (which requires sampled_op to be a sample based expectation
op with num_samples input tensor). If analytic_op is provided the
differentiators `differentiate_analytic` method will be invoked (which
requires analytic_op to be an analytic based expectation op that does
NOT have num_samples as an input). If both sampled_op and analytic_op
are provided an exception will be raised.
***CAUTION***
This `generate_differentiable_op()` can be called only ONCE because
of the `one differentiator per op` policy. You need to call `refresh()`
to reuse this differentiator with another op.
Args:
sampled_op: A `callable` op that you want to make differentiable
using this differentiator's `differentiate_sampled` method.
analytic_op: A `callable` op that you want to make differentiable
using this differentiators `differentiate_analytic` method.
Returns:
A `callable` op that who's gradients are now registered to be
a call to this differentiators `differentiate_*` function.
"""
if hasattr(self, 'expectation_op'):
raise TypeError('This differentiator is already used for other '
'op by calling generate_differentiable_op before. '
'You need to call `refresh()` to reuse this '
'differentiator with another op.')
if sampled_op is None and analytic_op is None:
raise ValueError('generate_differentiable_op requires a sample '
'based expectation op to be provided with arg '
'\'sampled_op\', or an analytically '
'calculated expectation op to be provided with '
'arg \'analytic_op\'.')
if sampled_op is not None and analytic_op is not None:
raise ValueError('generate_differentiable_op was given both a '
'sampled_op and analytic_op. '
'Please provide analytic_op if the '
'operation you wish to make differentiable is '
'analytical. Otherwise provide '
'sampled_op if the operation you want '
'to make differentiable is sample based.')
if not callable(sampled_op) and not callable(analytic_op):
raise TypeError('Provided arguments must be callable tensorflow '
'ops.')
# TODO (mbbrough): find a better workaround than this to ensure
# that the correct sample based expectation wasn't accidentally
# put inside of the analytical_op argument or vice versa.
# right all that is checked is that the desire op signatures
# are substrings of the given op signature.
if analytic_op is not None:
signature = inspect.signature(analytic_op).parameters
expected_signature = [
'programs', 'symbol_names', 'symbol_values', 'pauli_sums'
]
for key in expected_signature:
if not any(key in s for s in signature):
raise ValueError('unexpected signature for analytic_op. '
'Given arg: {}.'.format(str(key)) + ''
'The signature should contain: {}.'.format(
list(expected_signature)) + ''
' Given: {}'.format(list(signature)) + ''
'Note: noisy ops should use sampled_op')
if 'num_samples' in signature:
raise ValueError('found num_samples in analytic_op. Please '
'ensure that you are providing an analytical '
'expectation op in the analytic_op arg.'
'Note: noisy ops should use sampled_op')
if sampled_op is not None:
signature = inspect.signature(sampled_op).parameters
expected_signature = [
'programs', 'symbol_names', 'symbol_values', 'pauli_sums',
'num_samples'
]
for key in expected_signature:
if not any(key in s for s in signature):
raise ValueError('unexpected signature for sampled_op. '
'Given arg: {}.'.format(str(key)) + ''
'The signature should contain: {}.'.format(
list(expected_signature)))
@tf.custom_gradient
def op_wrapper_analytic(programs, symbol_names, symbol_values,
pauli_sums):
forward_pass_vals = analytic_op(programs, symbol_names,
symbol_values, pauli_sums)
def gradient(grad):
return self._differentiate_ana(programs, symbol_names,
symbol_values, pauli_sums,
forward_pass_vals, grad)
return forward_pass_vals, gradient
@tf.custom_gradient
def op_wrapper_sampled(programs, symbol_names, symbol_values,
pauli_sums, num_samples):
forward_pass_vals = sampled_op(programs, symbol_names,
symbol_values, pauli_sums,
num_samples)
def gradient(grad):
return self._differentiate_sam(programs, symbol_names,
symbol_values, pauli_sums,
num_samples, forward_pass_vals,
grad)
return forward_pass_vals, gradient
self.expectation_op = analytic_op
return_func = op_wrapper_analytic
if analytic_op is None:
self.expectation_op = sampled_op
return_func = op_wrapper_sampled
return return_func