text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
stepped, complete, reset = self.inprogress_constraint.update(token_id)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
self.inprogress_constraint = None | 10,774 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py |
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
self.inprogress_constraint = None
if len(self.pending_constraints) == 0:
# we're done!
self.completed = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(token_id):
stepped, complete, reset = pending_constraint.update(token_id) | 10,774 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py |
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true."
)
if complete:
self.complete_constraints.append(pending_constraint)
self.inprogress_constraint = None
if not complete and stepped:
self.inprogress_constraint = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
self.pending_constraints = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
) | 10,774 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py |
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
self.completed = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def copy(self, stateful=True):
new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state. | 10,774 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py |
if stateful:
new_state.complete_constraints = [
constraint.copy(stateful=True) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 10,774 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/beam_constraints.py |
class SageMakerTrainer(Trainer):
def __init__(self, args=None, **kwargs):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",
FutureWarning,
)
super().__init__(args=args, **kwargs) | 10,775 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/trainer_sm.py |
class SageMakerTrainingArguments(TrainingArguments):
mp_parameters: str = field(
default="",
metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"},
)
def __post_init__(self):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead.",
FutureWarning,
) | 10,776 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/training_args_sm.py |
@cached_property
def _setup_devices(self) -> "torch.device":
logger.info("PyTorch: setting up devices")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch"
)
if self.no_cuda:
device = torch.device("cpu")
self._n_gpu = 0
elif is_sagemaker_model_parallel_available():
local_rank = smp.local_rank()
device = torch.device("cuda", local_rank)
self._n_gpu = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 | 10,776 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/training_args_sm.py |
torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta)
self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at | 10,776 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/training_args_sm.py |
# the default value.
self._n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1 | 10,776 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/training_args_sm.py |
if device.type == "cuda":
torch.cuda.set_device(device)
return device
@property
def world_size(self):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def place_model_on_device(self):
return not is_sagemaker_model_parallel_available()
@property
def _no_sync_in_gradient_accumulation(self):
return False | 10,776 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/sagemaker/training_args_sm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.