in pkg/controllers/job/job_controller_util.go [43:149]
func createJobPod(job *batch.Job, template *v1.PodTemplateSpec, topologyPolicy batch.NumaPolicy, ix int, jobForwarding bool) *v1.Pod {
templateCopy := template.DeepCopy()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: jobhelpers.MakePodName(job.Name, template.Name, ix),
Namespace: job.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(job, helpers.JobKind),
},
Labels: templateCopy.Labels,
Annotations: templateCopy.Annotations,
},
Spec: templateCopy.Spec,
}
// If no scheduler name in Pod, use scheduler name from Job.
if len(pod.Spec.SchedulerName) == 0 {
pod.Spec.SchedulerName = job.Spec.SchedulerName
}
volumeMap := make(map[string]string)
for _, volume := range job.Spec.Volumes {
vcName := volume.VolumeClaimName
name := fmt.Sprintf("%s-%s", job.Name, jobhelpers.GenRandomStr(12))
if _, ok := volumeMap[vcName]; !ok {
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: vcName,
},
},
}
pod.Spec.Volumes = append(pod.Spec.Volumes, volume)
volumeMap[vcName] = name
} else {
// duplicate volumes, should be prevented
continue
}
for i, c := range pod.Spec.Containers {
vm := v1.VolumeMount{
MountPath: volume.MountPath,
Name: name,
}
pod.Spec.Containers[i].VolumeMounts = append(c.VolumeMounts, vm)
}
}
tsKey := templateCopy.Name
if len(tsKey) == 0 {
tsKey = batch.DefaultTaskSpec
}
if len(pod.Annotations) == 0 {
pod.Annotations = make(map[string]string)
}
pod.Annotations[batch.TaskSpecKey] = tsKey
pod.Annotations[schedulingv2.KubeGroupNameAnnotationKey] = job.Name
pod.Annotations[batch.JobNameKey] = job.Name
pod.Annotations[batch.QueueNameKey] = job.Spec.Queue
pod.Annotations[batch.JobVersion] = fmt.Sprintf("%d", job.Status.Version)
pod.Annotations[batch.PodTemplateKey] = fmt.Sprintf("%s-%s", job.Name, template.Name)
if topologyPolicy != "" {
pod.Annotations[schedulingv2.NumaPolicyKey] = string(topologyPolicy)
}
if len(job.Annotations) > 0 {
if value, found := job.Annotations[schedulingv2.PodPreemptable]; found {
pod.Annotations[schedulingv2.PodPreemptable] = value
}
if value, found := job.Annotations[schedulingv2.RevocableZone]; found {
pod.Annotations[schedulingv2.RevocableZone] = value
}
if value, found := job.Annotations[schedulingv2.JDBMinAvailable]; found {
pod.Annotations[schedulingv2.JDBMinAvailable] = value
} else if value, found := job.Annotations[schedulingv2.JDBMaxUnavailable]; found {
pod.Annotations[schedulingv2.JDBMaxUnavailable] = value
}
}
if len(pod.Labels) == 0 {
pod.Labels = make(map[string]string)
}
// Set pod labels for Service.
pod.Labels[batch.JobNameKey] = job.Name
pod.Labels[batch.TaskSpecKey] = tsKey
pod.Labels[batch.JobNamespaceKey] = job.Namespace
pod.Labels[batch.QueueNameKey] = job.Spec.Queue
if len(job.Labels) > 0 {
if value, found := job.Labels[schedulingv2.PodPreemptable]; found {
pod.Labels[schedulingv2.PodPreemptable] = value
}
}
if jobForwarding {
pod.Annotations[batch.JobForwardingKey] = "true"
pod.Labels[batch.JobForwardingKey] = "true"
}
return pod
}