from onnxruntime import InferenceSession DOC_TR = InferenceSession("model/dewarp_model/doc_tr_pp.onnx", providers=["CUDAExecutionProvider"], provider_options=[{"device_id": 0}])