0001: #ifndef _LINUX_SCHED_H 0002: #define _LINUX_SCHED_H 0003: 0004: #include <uapi/linux/sched.h> 0005: 0006: #include <linux/sched/prio.h> 0007: 0008: 0009: struct sched_param { 0010: int sched_priority; 0011: }; 0012: 0013: #include <asm/param.h> /* for HZ */ 0014: 0015: #include <linux/capability.h> 0016: #include <linux/threads.h> 0017: #include <linux/kernel.h> 0018: #include <linux/types.h> 0019: #include <linux/timex.h> 0020: #include <linux/jiffies.h> 0021: #include <linux/plist.h> 0022: #include <linux/rbtree.h> 0023: #include <linux/thread_info.h> 0024: #include <linux/cpumask.h> 0025: #include <linux/errno.h> 0026: #include <linux/nodemask.h> 0027: #include <linux/mm_types.h> 0028: #include <linux/preempt.h> 0029: 0030: #include <asm/page.h> 0031: #include <asm/ptrace.h> 0032: #include <linux/cputime.h> 0033: 0034: #include <linux/smp.h> 0035: #include <linux/sem.h> 0036: #include <linux/shm.h> 0037: #include <linux/signal.h> 0038: #include <linux/compiler.h> 0039: #include <linux/completion.h> 0040: #include <linux/pid.h> 0041: #include <linux/percpu.h> 0042: #include <linux/topology.h> 0043: #include <linux/proportions.h> 0044: #include <linux/seccomp.h> 0045: #include <linux/rcupdate.h> 0046: #include <linux/rculist.h> 0047: #include <linux/rtmutex.h> 0048: 0049: #include <linux/time.h> 0050: #include <linux/param.h> 0051: #include <linux/resource.h> 0052: #include <linux/timer.h> 0053: #include <linux/hrtimer.h> 0054: #include <linux/task_io_accounting.h> 0055: #include <linux/latencytop.h> 0056: #include <linux/cred.h> 0057: #include <linux/llist.h> 0058: #include <linux/uidgid.h> 0059: #include <linux/gfp.h> 0060: #include <linux/magic.h> 0061: #include <linux/cgroup-defs.h> 0062: 0063: #include <asm/processor.h> 0064: 0065: #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ 0066: 0067: /* 0068: * Extended scheduling parameters data structure. 0069: * 0070: * This is needed because the original struct sched_param can not be 0071: * altered without introducing ABI issues with legacy applications 0072: * (e.g., in sched_getparam()). 0073: * 0074: * However, the possibility of specifying more than just a priority for 0075: * the tasks may be useful for a wide variety of application fields, e.g., 0076: * multimedia, streaming, automation and control, and many others. 0077: * 0078: * This variant (sched_attr) is meant at describing a so-called 0079: * sporadic time-constrained task. In such model a task is specified by: 0080: * - the activation period or minimum instance inter-arrival time; 0081: * - the maximum (or average, depending on the actual scheduling 0082: * discipline) computation time of all instances, a.k.a. runtime; 0083: * - the deadline (relative to the actual activation time) of each 0084: * instance. 0085: * Very briefly, a periodic (sporadic) task asks for the execution of 0086: * some specific computation --which is typically called an instance-- 0087: * (at most) every period. Moreover, each instance typically lasts no more 0088: * than the runtime and must be completed by time instant t equal to 0089: * the instance activation time + the deadline. 0090: * 0091: * This is reflected by the actual fields of the sched_attr structure: 0092: * 0093: * @size size of the structure, for fwd/bwd compat. 0094: * 0095: * @sched_policy task's scheduling policy 0096: * @sched_flags for customizing the scheduler behaviour 0097: * @sched_nice task's nice value (SCHED_NORMAL/BATCH) 0098: * @sched_priority task's static priority (SCHED_FIFO/RR) 0099: * @sched_deadline representative of the task's deadline 0100: * @sched_runtime representative of the task's runtime 0101: * @sched_period representative of the task's period 0102: * 0103: * Given this task model, there are a multiplicity of scheduling algorithms 0104: * and policies, that can be used to ensure all the tasks will make their 0105: * timing constraints. 0106: * 0107: * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the 0108: * only user of this new interface. More information about the algorithm 0109: * available in the scheduling class file or in Documentation/. 0110: */ 0111: struct sched_attr { 0112: u32 size; 0113: 0114: u32 sched_policy; 0115: u64 sched_flags; 0116: 0117: /* SCHED_NORMAL, SCHED_BATCH */ 0118: s32 sched_nice; 0119: 0120: /* SCHED_FIFO, SCHED_RR */ 0121: u32 sched_priority; 0122: 0123: /* SCHED_DEADLINE */ 0124: u64 sched_runtime; 0125: u64 sched_deadline; 0126: u64 sched_period; 0127: }; 0128: 0129: struct futex_pi_state; 0130: struct robust_list_head; 0131: struct bio_list; 0132: struct fs_struct; 0133: struct perf_event_context; 0134: struct blk_plug; 0135: struct filename; 0136: struct nameidata; 0137: 0138: #define VMACACHE_BITS 2 0139: #define VMACACHE_SIZE (1U << VMACACHE_BITS) 0140: #define VMACACHE_MASK (VMACACHE_SIZE - 1) 0141: 0142: /* 0143: * These are the constant used to fake the fixed-point load-average 0144: * counting. Some notes: 0145: * - 11 bit fractions expand to 22 bits by the multiplies: this gives 0146: * a load-average precision of 10 bits integer + 11 bits fractional 0147: * - if you want to count load-averages more often, you need more 0148: * precision, or rounding will get you. With 2-second counting freq, 0149: * the EXP_n values would be 1981, 2034 and 2043 if still using only 0150: * 11 bit fractions. 0151: */ 0152: extern unsigned long avenrun[]; /* Load averages */ 0153: extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 0154: 0155: #define FSHIFT 11 /* nr of bits of precision */ 0156: #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 0157: #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 0158: #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 0159: #define EXP_5 2014 /* 1/exp(5sec/5min) */ 0160: #define EXP_15 2037 /* 1/exp(5sec/15min) */ 0161: 0162: #define CALC_LOAD(load,exp,n) \ 0163: load *= exp; \ 0164: load += n*(FIXED_1-exp); \ 0165: load >>= FSHIFT; 0166: 0167: extern unsigned long total_forks; 0168: extern int nr_threads; 0169: DECLARE_PER_CPU(unsigned long, process_counts); 0170: extern int nr_processes(void); 0171: extern unsigned long nr_running(void); 0172: extern bool single_task_running(void); 0173: extern unsigned long nr_iowait(void); 0174: extern unsigned long nr_iowait_cpu(int cpu); 0175: extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); 0176: 0177: extern void calc_global_load(unsigned long ticks); 0178: 0179: #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 0180: extern void update_cpu_load_nohz(void); 0181: #else 0182: static inline void update_cpu_load_nohz(void) { } 0183: #endif 0184: 0185: extern unsigned long get_parent_ip(unsigned long addr); 0186: 0187: extern void dump_cpu_task(int cpu); 0188: 0189: struct seq_file; 0190: struct cfs_rq; 0191: struct task_group; 0192: #ifdef CONFIG_SCHED_DEBUG 0193: extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 0194: extern void proc_sched_set_task(struct task_struct *p); 0195: #endif 0196: 0197: /* 0198: * Task state bitmask. NOTE! These bits are also 0199: * encoded in fs/proc/array.c: get_task_state(). 0200: * 0201: * We have two separate sets of flags: task->state 0202: * is about runnability, while task->exit_state are 0203: * about the task exiting. Confusing, but this way 0204: * modifying one set can't modify the other one by 0205: * mistake. 0206: */ 0207: #define TASK_RUNNING 0 0208: #define TASK_INTERRUPTIBLE 1 0209: #define TASK_UNINTERRUPTIBLE 2 0210: #define __TASK_STOPPED 4 0211: #define __TASK_TRACED 8 0212: /* in tsk->exit_state */ 0213: #define EXIT_DEAD 16 0214: #define EXIT_ZOMBIE 32 0215: #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 0216: /* in tsk->state again */ 0217: #define TASK_DEAD 64 0218: #define TASK_WAKEKILL 128 0219: #define TASK_WAKING 256 0220: #define TASK_PARKED 512 0221: #define TASK_NOLOAD 1024 0222: #define TASK_STATE_MAX 2048 0223: 0224: #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" 0225: 0226: extern char ___assert_task_state[1 - 2*!!( 0227: sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 0228: 0229: /* Convenience macros for the sake of set_task_state */ 0230: #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 0231: #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 0232: #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 0233: 0234: #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 0235: 0236: /* Convenience macros for the sake of wake_up */ 0237: #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 0238: #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 0239: 0240: /* get_task_state() */ 0241: #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 0242: TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 0243: __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 0244: 0245: #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 0246: #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 0247: #define task_is_stopped_or_traced(task) \ 0248: ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 0249: #define task_contributes_to_load(task) \ 0250: ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 0251: (task->flags & PF_FROZEN) == 0 && \ 0252: (task->state & TASK_NOLOAD) == 0) 0253: 0254: #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 0255: 0256: #define __set_task_state(tsk, state_value) \ 0257: do { \ 0258: (tsk)->task_state_change = _THIS_IP_; \ 0259: (tsk)->state = (state_value); \ 0260: } while (0) 0261: #define set_task_state(tsk, state_value) \ 0262: do { \ 0263: (tsk)->task_state_change = _THIS_IP_; \ 0264: smp_store_mb((tsk)->state, (state_value)); \ 0265: } while (0) 0266: 0267: /* 0268: * set_current_state() includes a barrier so that the write of current->state 0269: * is correctly serialised wrt the caller's subsequent test of whether to 0270: * actually sleep: 0271: * 0272: * set_current_state(TASK_UNINTERRUPTIBLE); 0273: * if (do_i_need_to_sleep()) 0274: * schedule(); 0275: * 0276: * If the caller does not need such serialisation then use __set_current_state() 0277: */ 0278: #define __set_current_state(state_value) \ 0279: do { \ 0280: current->task_state_change = _THIS_IP_; \ 0281: current->state = (state_value); \ 0282: } while (0) 0283: #define set_current_state(state_value) \ 0284: do { \ 0285: current->task_state_change = _THIS_IP_; \ 0286: smp_store_mb(current->state, (state_value)); \ 0287: } while (0) 0288: 0289: #else 0290: 0291: #define __set_task_state(tsk, state_value) \ 0292: do { (tsk)->state = (state_value); } while (0) 0293: #define set_task_state(tsk, state_value) \ 0294: smp_store_mb((tsk)->state, (state_value)) 0295: 0296: /* 0297: * set_current_state() includes a barrier so that the write of current->state 0298: * is correctly serialised wrt the caller's subsequent test of whether to 0299: * actually sleep: 0300: * 0301: * set_current_state(TASK_UNINTERRUPTIBLE); 0302: * if (do_i_need_to_sleep()) 0303: * schedule(); 0304: * 0305: * If the caller does not need such serialisation then use __set_current_state() 0306: */ 0307: #define __set_current_state(state_value) \ 0308: do { current->state = (state_value); } while (0) 0309: #define set_current_state(state_value) \ 0310: smp_store_mb(current->state, (state_value)) 0311: 0312: #endif 0313: 0314: /* Task command name length */ 0315: #define TASK_COMM_LEN 16 0316: 0317: #include <linux/spinlock.h> 0318: 0319: /* 0320: * This serializes "schedule()" and also protects 0321: * the run-queue from deletions/modifications (but 0322: * _adding_ to the beginning of the run-queue has 0323: * a separate lock). 0324: */ 0325: extern rwlock_t tasklist_lock; 0326: extern spinlock_t mmlist_lock; 0327: 0328: struct task_struct; 0329: 0330: #ifdef CONFIG_PROVE_RCU 0331: extern int lockdep_tasklist_lock_is_held(void); 0332: #endif /* #ifdef CONFIG_PROVE_RCU */ 0333: 0334: extern void sched_init(void); 0335: extern void sched_init_smp(void); 0336: extern asmlinkage void schedule_tail(struct task_struct *prev); 0337: extern void init_idle(struct task_struct *idle, int cpu); 0338: extern void init_idle_bootup_task(struct task_struct *idle); 0339: 0340: extern cpumask_var_t cpu_isolated_map; 0341: 0342: extern int runqueue_is_locked(int cpu); 0343: 0344: #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 0345: extern void nohz_balance_enter_idle(int cpu); 0346: extern void set_cpu_sd_state_idle(void); 0347: extern int get_nohz_timer_target(void); 0348: #else 0349: static inline void nohz_balance_enter_idle(int cpu) { } 0350: static inline void set_cpu_sd_state_idle(void) { } 0351: #endif 0352: 0353: /* 0354: * Only dump TASK_* tasks. (0 for all tasks) 0355: */ 0356: extern void show_state_filter(unsigned long state_filter); 0357: 0358: static inline void show_state(void) 0359: { 0360: show_state_filter(0); 0361: } 0362: 0363: extern void show_regs(struct pt_regs *); 0364: 0365: /* 0366: * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 0367: * task), SP is the stack pointer of the first frame that should be shown in the back 0368: * trace (or NULL if the entire call-chain of the task should be shown). 0369: */ 0370: extern void show_stack(struct task_struct *task, unsigned long *sp); 0371: 0372: extern void cpu_init (void); 0373: extern void trap_init(void); 0374: extern void update_process_times(int user); 0375: extern void scheduler_tick(void); 0376: 0377: extern void sched_show_task(struct task_struct *p); 0378: 0379: #ifdef CONFIG_LOCKUP_DETECTOR 0380: extern void touch_softlockup_watchdog(void); 0381: extern void touch_softlockup_watchdog_sync(void); 0382: extern void touch_all_softlockup_watchdogs(void); 0383: extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, 0384: void __user *buffer, 0385: size_t *lenp, loff_t *ppos); 0386: extern unsigned int softlockup_panic; 0387: extern unsigned int hardlockup_panic; 0388: void lockup_detector_init(void); 0389: #else 0390: static inline void touch_softlockup_watchdog(void) 0391: { 0392: } 0393: static inline void touch_softlockup_watchdog_sync(void) 0394: { 0395: } 0396: static inline void touch_all_softlockup_watchdogs(void) 0397: { 0398: } 0399: static inline void lockup_detector_init(void) 0400: { 0401: } 0402: #endif 0403: 0404: #ifdef CONFIG_DETECT_HUNG_TASK 0405: void reset_hung_task_detector(void); 0406: #else 0407: static inline void reset_hung_task_detector(void) 0408: { 0409: } 0410: #endif 0411: 0412: /* Attach to any functions which should be ignored in wchan output. */ 0413: #define __sched __attribute__((__section__(".sched.text"))) 0414: 0415: /* Linker adds these: start and end of __sched functions */ 0416: extern char __sched_text_start[], __sched_text_end[]; 0417: 0418: /* Is this address in the __sched functions? */ 0419: extern int in_sched_functions(unsigned long addr); 0420: 0421: #define MAX_SCHEDULE_TIMEOUT LONG_MAX 0422: extern signed long schedule_timeout(signed long timeout); 0423: extern signed long schedule_timeout_interruptible(signed long timeout); 0424: extern signed long schedule_timeout_killable(signed long timeout); 0425: extern signed long schedule_timeout_uninterruptible(signed long timeout); 0426: asmlinkage void schedule(void); 0427: extern void schedule_preempt_disabled(void); 0428: 0429: extern long io_schedule_timeout(long timeout); 0430: 0431: static inline void io_schedule(void) 0432: { 0433: io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); 0434: } 0435: 0436: struct nsproxy; 0437: struct user_namespace; 0438: 0439: #ifdef CONFIG_MMU 0440: extern void arch_pick_mmap_layout(struct mm_struct *mm); 0441: extern unsigned long 0442: arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 0443: unsigned long, unsigned long); 0444: extern unsigned long 0445: arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 0446: unsigned long len, unsigned long pgoff, 0447: unsigned long flags); 0448: #else 0449: static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 0450: #endif 0451: 0452: #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ 0453: #define SUID_DUMP_USER 1 /* Dump as user of process */ 0454: #define SUID_DUMP_ROOT 2 /* Dump as root */ 0455: 0456: /* mm flags */ 0457: 0458: /* for SUID_DUMP_* above */ 0459: #define MMF_DUMPABLE_BITS 2 0460: #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 0461: 0462: extern void set_dumpable(struct mm_struct *mm, int value); 0463: /* 0464: * This returns the actual value of the suid_dumpable flag. For things 0465: * that are using this for checking for privilege transitions, it must 0466: * test against SUID_DUMP_USER rather than treating it as a boolean 0467: * value. 0468: */ 0469: static inline int __get_dumpable(unsigned long mm_flags) 0470: { 0471: return mm_flags & MMF_DUMPABLE_MASK; 0472: } 0473: 0474: static inline int get_dumpable(struct mm_struct *mm) 0475: { 0476: return __get_dumpable(mm->flags); 0477: } 0478: 0479: /* coredump filter bits */ 0480: #define MMF_DUMP_ANON_PRIVATE 2 0481: #define MMF_DUMP_ANON_SHARED 3 0482: #define MMF_DUMP_MAPPED_PRIVATE 4 0483: #define MMF_DUMP_MAPPED_SHARED 5 0484: #define MMF_DUMP_ELF_HEADERS 6 0485: #define MMF_DUMP_HUGETLB_PRIVATE 7 0486: #define MMF_DUMP_HUGETLB_SHARED 8 0487: #define MMF_DUMP_DAX_PRIVATE 9 0488: #define MMF_DUMP_DAX_SHARED 10 0489: 0490: #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 0491: #define MMF_DUMP_FILTER_BITS 9 0492: #define MMF_DUMP_FILTER_MASK \ 0493: (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 0494: #define MMF_DUMP_FILTER_DEFAULT \ 0495: ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 0496: (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 0497: 0498: #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 0499: # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 0500: #else 0501: # define MMF_DUMP_MASK_DEFAULT_ELF 0 0502: #endif 0503: /* leave room for more dump flags */ 0504: #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 0505: #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 0506: #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ 0507: 0508: #define MMF_HAS_UPROBES 19 /* has uprobes */ 0509: #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ 0510: 0511: #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 0512: 0513: struct sighand_struct { 0514: atomic_t count; 0515: struct k_sigaction action[_NSIG]; 0516: spinlock_t siglock; 0517: wait_queue_head_t signalfd_wqh; 0518: }; 0519: 0520: struct pacct_struct { 0521: int ac_flag; 0522: long ac_exitcode; 0523: unsigned long ac_mem; 0524: cputime_t ac_utime, ac_stime; 0525: unsigned long ac_minflt, ac_majflt; 0526: }; 0527: 0528: struct cpu_itimer { 0529: cputime_t expires; 0530: cputime_t incr; 0531: u32 error; 0532: u32 incr_error; 0533: }; 0534: 0535: /** 0536: * struct prev_cputime - snaphsot of system and user cputime 0537: * @utime: time spent in user mode 0538: * @stime: time spent in system mode 0539: * @lock: protects the above two fields 0540: * 0541: * Stores previous user/system time values such that we can guarantee 0542: * monotonicity. 0543: */ 0544: struct prev_cputime { 0545: #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 0546: cputime_t utime; 0547: cputime_t stime; 0548: raw_spinlock_t lock; 0549: #endif 0550: }; 0551: 0552: static inline void prev_cputime_init(struct prev_cputime *prev) 0553: { 0554: #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 0555: prev->utime = prev->stime = 0; 0556: raw_spin_lock_init(&prev->lock); 0557: #endif 0558: } 0559: 0560: /** 0561: * struct task_cputime - collected CPU time counts 0562: * @utime: time spent in user mode, in &cputime_t units 0563: * @stime: time spent in kernel mode, in &cputime_t units 0564: * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 0565: * 0566: * This structure groups together three kinds of CPU time that are tracked for 0567: * threads and thread groups. Most things considering CPU time want to group 0568: * these counts together and treat all three of them in parallel. 0569: */ 0570: struct task_cputime { 0571: cputime_t utime; 0572: cputime_t stime; 0573: unsigned long long sum_exec_runtime; 0574: }; 0575: 0576: /* Alternate field names when used to cache expirations. */ 0577: #define virt_exp utime 0578: #define prof_exp stime 0579: #define sched_exp sum_exec_runtime 0580: 0581: #define INIT_CPUTIME \ 0582: (struct task_cputime) { \ 0583: .utime = 0, \ 0584: .stime = 0, \ 0585: .sum_exec_runtime = 0, \ 0586: } 0587: 0588: /* 0589: * This is the atomic variant of task_cputime, which can be used for 0590: * storing and updating task_cputime statistics without locking. 0591: */ 0592: struct task_cputime_atomic { 0593: atomic64_t utime; 0594: atomic64_t stime; 0595: atomic64_t sum_exec_runtime; 0596: }; 0597: 0598: #define INIT_CPUTIME_ATOMIC \ 0599: (struct task_cputime_atomic) { \ 0600: .utime = ATOMIC64_INIT(0), \ 0601: .stime = ATOMIC64_INIT(0), \ 0602: .sum_exec_runtime = ATOMIC64_INIT(0), \ 0603: } 0604: 0605: #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) 0606: 0607: /* 0608: * Disable preemption until the scheduler is running -- use an unconditional 0609: * value so that it also works on !PREEMPT_COUNT kernels. 0610: * 0611: * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). 0612: */ 0613: #define INIT_PREEMPT_COUNT PREEMPT_OFFSET 0614: 0615: /* 0616: * Initial preempt_count value; reflects the preempt_count schedule invariant 0617: * which states that during context switches: 0618: * 0619: * preempt_count() == 2*PREEMPT_DISABLE_OFFSET 0620: * 0621: * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. 0622: * Note: See finish_task_switch(). 0623: */ 0624: #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) 0625: 0626: /** 0627: * struct thread_group_cputimer - thread group interval timer counts 0628: * @cputime_atomic: atomic thread group interval timers. 0629: * @running: true when there are timers running and 0630: * @cputime_atomic receives updates. 0631: * @checking_timer: true when a thread in the group is in the 0632: * process of checking for thread group timers. 0633: * 0634: * This structure contains the version of task_cputime, above, that is 0635: * used for thread group CPU timer calculations. 0636: */ 0637: struct thread_group_cputimer { 0638: struct task_cputime_atomic cputime_atomic; 0639: bool running; 0640: bool checking_timer; 0641: }; 0642: 0643: #include <linux/rwsem.h> 0644: struct autogroup; 0645: 0646: /* 0647: * NOTE! "signal_struct" does not have its own 0648: * locking, because a shared signal_struct always 0649: * implies a shared sighand_struct, so locking 0650: * sighand_struct is always a proper superset of 0651: * the locking of signal_struct. 0652: */ 0653: struct signal_struct { 0654: atomic_t sigcnt; 0655: atomic_t live; 0656: int nr_threads; 0657: struct list_head thread_head; 0658: 0659: wait_queue_head_t wait_chldexit; /* for wait4() */ 0660: 0661: /* current thread group signal load-balancing target: */ 0662: struct task_struct *curr_target; 0663: 0664: /* shared signal handling: */ 0665: struct sigpending shared_pending; 0666: 0667: /* thread group exit support */ 0668: int group_exit_code; 0669: /* overloaded: 0670: * - notify group_exit_task when ->count is equal to notify_count 0671: * - everyone except group_exit_task is stopped during signal delivery 0672: * of fatal signals, group_exit_task processes the signal. 0673: */ 0674: int notify_count; 0675: struct task_struct *group_exit_task; 0676: 0677: /* thread group stop support, overloads group_exit_code too */ 0678: int group_stop_count; 0679: unsigned int flags; /* see SIGNAL_* flags below */ 0680: 0681: /* 0682: * PR_SET_CHILD_SUBREAPER marks a process, like a service 0683: * manager, to re-parent orphan (double-forking) child processes 0684: * to this process instead of 'init'. The service manager is 0685: * able to receive SIGCHLD signals and is able to investigate 0686: * the process until it calls wait(). All children of this 0687: * process will inherit a flag if they should look for a 0688: * child_subreaper process at exit. 0689: */ 0690: unsigned int is_child_subreaper:1; 0691: unsigned int has_child_subreaper:1; 0692: 0693: /* POSIX.1b Interval Timers */ 0694: int posix_timer_id; 0695: struct list_head posix_timers; 0696: 0697: /* ITIMER_REAL timer for the process */ 0698: struct hrtimer real_timer; 0699: struct pid *leader_pid; 0700: ktime_t it_real_incr; 0701: 0702: /* 0703: * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 0704: * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 0705: * values are defined to 0 and 1 respectively 0706: */ 0707: struct cpu_itimer it[2]; 0708: 0709: /* 0710: * Thread group totals for process CPU timers. 0711: * See thread_group_cputimer(), et al, for details. 0712: */ 0713: struct thread_group_cputimer cputimer; 0714: 0715: /* Earliest-expiration cache. */ 0716: struct task_cputime cputime_expires; 0717: 0718: struct list_head cpu_timers[3]; 0719: 0720: struct pid *tty_old_pgrp; 0721: 0722: /* boolean value for session group leader */ 0723: int leader; 0724: 0725: struct tty_struct *tty; /* NULL if no tty */ 0726: 0727: #ifdef CONFIG_SCHED_AUTOGROUP 0728: struct autogroup *autogroup; 0729: #endif 0730: /* 0731: * Cumulative resource counters for dead threads in the group, 0732: * and for reaped dead child processes forked by this group. 0733: * Live threads maintain their own counters and add to these 0734: * in __exit_signal, except for the group leader. 0735: */ 0736: seqlock_t stats_lock; 0737: cputime_t utime, stime, cutime, cstime; 0738: cputime_t gtime; 0739: cputime_t cgtime; 0740: struct prev_cputime prev_cputime; 0741: unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 0742: unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 0743: unsigned long inblock, oublock, cinblock, coublock; 0744: unsigned long maxrss, cmaxrss; 0745: struct task_io_accounting ioac; 0746: 0747: /* 0748: * Cumulative ns of schedule CPU time fo dead threads in the 0749: * group, not including a zombie group leader, (This only differs 0750: * from jiffies_to_ns(utime + stime) if sched_clock uses something 0751: * other than jiffies.) 0752: */ 0753: unsigned long long sum_sched_runtime; 0754: 0755: /* 0756: * We don't bother to synchronize most readers of this at all, 0757: * because there is no reader checking a limit that actually needs 0758: * to get both rlim_cur and rlim_max atomically, and either one 0759: * alone is a single word that can safely be read normally. 0760: * getrlimit/setrlimit use task_lock(current->group_leader) to 0761: * protect this instead of the siglock, because they really 0762: * have no need to disable irqs. 0763: */ 0764: struct rlimit rlim[RLIM_NLIMITS]; 0765: 0766: #ifdef CONFIG_BSD_PROCESS_ACCT 0767: struct pacct_struct pacct; /* per-process accounting information */ 0768: #endif 0769: #ifdef CONFIG_TASKSTATS 0770: struct taskstats *stats; 0771: #endif 0772: #ifdef CONFIG_AUDIT 0773: unsigned audit_tty; 0774: unsigned audit_tty_log_passwd; 0775: struct tty_audit_buf *tty_audit_buf; 0776: #endif 0777: 0778: oom_flags_t oom_flags; 0779: short oom_score_adj; /* OOM kill score adjustment */ 0780: short oom_score_adj_min; /* OOM kill score adjustment min value. 0781: * Only settable by CAP_SYS_RESOURCE. */ 0782: 0783: struct mutex cred_guard_mutex; /* guard against foreign influences on 0784: * credential calculations 0785: * (notably. ptrace) */ 0786: }; 0787: 0788: /* 0789: * Bits in flags field of signal_struct. 0790: */ 0791: #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 0792: #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 0793: #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 0794: #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ 0795: /* 0796: * Pending notifications to parent. 0797: */ 0798: #define SIGNAL_CLD_STOPPED 0x00000010 0799: #define SIGNAL_CLD_CONTINUED 0x00000020 0800: #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 0801: 0802: #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 0803: 0804: #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ 0805: SIGNAL_STOP_CONTINUED) 0806: 0807: static inline void signal_set_stop_flags(struct signal_struct *sig, 0808: unsigned int flags) 0809: { 0810: WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); 0811: sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; 0812: } 0813: 0814: /* If true, all threads except ->group_exit_task have pending SIGKILL */ 0815: static inline int signal_group_exit(const struct signal_struct *sig) 0816: { 0817: return (sig->flags & SIGNAL_GROUP_EXIT) || 0818: (sig->group_exit_task != NULL); 0819: } 0820: 0821: /* 0822: * Some day this will be a full-fledged user tracking system.. 0823: */ 0824: struct user_struct { 0825: atomic_t __count; /* reference count */ 0826: atomic_t processes; /* How many processes does this user have? */ 0827: atomic_t sigpending; /* How many pending signals does this user have? */ 0828: #ifdef CONFIG_INOTIFY_USER 0829: atomic_t inotify_watches; /* How many inotify watches does this user have? */ 0830: atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 0831: #endif 0832: #ifdef CONFIG_FANOTIFY 0833: atomic_t fanotify_listeners; 0834: #endif 0835: #ifdef CONFIG_EPOLL 0836: atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ 0837: #endif 0838: #ifdef CONFIG_POSIX_MQUEUE 0839: /* protected by mq_lock */ 0840: unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 0841: #endif 0842: unsigned long locked_shm; /* How many pages of mlocked shm ? */ 0843: unsigned long unix_inflight; /* How many files in flight in unix sockets */ 0844: atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ 0845: 0846: #ifdef CONFIG_KEYS 0847: struct key *uid_keyring; /* UID specific keyring */ 0848: struct key *session_keyring; /* UID's default session keyring */ 0849: #endif 0850: 0851: /* Hash table maintenance information */ 0852: struct hlist_node uidhash_node; 0853: kuid_t uid; 0854: 0855: #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) 0856: atomic_long_t locked_vm; 0857: #endif 0858: }; 0859: 0860: extern int uids_sysfs_init(void); 0861: 0862: extern struct user_struct *find_user(kuid_t); 0863: 0864: extern struct user_struct root_user; 0865: #define INIT_USER (&root_user) 0866: 0867: 0868: struct backing_dev_info; 0869: struct reclaim_state; 0870: 0871: #ifdef CONFIG_SCHED_INFO 0872: struct sched_info { 0873: /* cumulative counters */ 0874: unsigned long pcount; /* # of times run on this cpu */ 0875: unsigned long long run_delay; /* time spent waiting on a runqueue */ 0876: 0877: /* timestamps */ 0878: unsigned long long last_arrival,/* when we last ran on a cpu */ 0879: last_queued; /* when we were last queued to run */ 0880: }; 0881: #endif /* CONFIG_SCHED_INFO */ 0882: 0883: #ifdef CONFIG_TASK_DELAY_ACCT 0884: struct task_delay_info { 0885: spinlock_t lock; 0886: unsigned int flags; /* Private per-task flags */ 0887: 0888: /* For each stat XXX, add following, aligned appropriately 0889: * 0890: * struct timespec XXX_start, XXX_end; 0891: * u64 XXX_delay; 0892: * u32 XXX_count; 0893: * 0894: * Atomicity of updates to XXX_delay, XXX_count protected by 0895: * single lock above (split into XXX_lock if contention is an issue). 0896: */ 0897: 0898: /* 0899: * XXX_count is incremented on every XXX operation, the delay 0900: * associated with the operation is added to XXX_delay. 0901: * XXX_delay contains the accumulated delay time in nanoseconds. 0902: */ 0903: u64 blkio_start; /* Shared by blkio, swapin */ 0904: u64 blkio_delay; /* wait for sync block io completion */ 0905: u64 swapin_delay; /* wait for swapin block io completion */ 0906: u32 blkio_count; /* total count of the number of sync block */ 0907: /* io operations performed */ 0908: u32 swapin_count; /* total count of the number of swapin block */ 0909: /* io operations performed */ 0910: 0911: u64 freepages_start; 0912: u64 freepages_delay; /* wait for memory reclaim */ 0913: u32 freepages_count; /* total count of memory reclaim */ 0914: }; 0915: #endif /* CONFIG_TASK_DELAY_ACCT */ 0916: 0917: static inline int sched_info_on(void) 0918: { 0919: #ifdef CONFIG_SCHEDSTATS 0920: return 1; 0921: #elif defined(CONFIG_TASK_DELAY_ACCT) 0922: extern int delayacct_on; 0923: return delayacct_on; 0924: #else 0925: return 0; 0926: #endif 0927: } 0928: 0929: enum cpu_idle_type { 0930: CPU_IDLE, 0931: CPU_NOT_IDLE, 0932: CPU_NEWLY_IDLE, 0933: CPU_MAX_IDLE_TYPES 0934: }; 0935: 0936: /* 0937: * Increase resolution of cpu_capacity calculations 0938: */ 0939: #define SCHED_CAPACITY_SHIFT 10 0940: #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 0941: 0942: /* 0943: * Wake-queues are lists of tasks with a pending wakeup, whose 0944: * callers have already marked the task as woken internally, 0945: * and can thus carry on. A common use case is being able to 0946: * do the wakeups once the corresponding user lock as been 0947: * released. 0948: * 0949: * We hold reference to each task in the list across the wakeup, 0950: * thus guaranteeing that the memory is still valid by the time 0951: * the actual wakeups are performed in wake_up_q(). 0952: * 0953: * One per task suffices, because there's never a need for a task to be 0954: * in two wake queues simultaneously; it is forbidden to abandon a task 0955: * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is 0956: * already in a wake queue, the wakeup will happen soon and the second 0957: * waker can just skip it. 0958: * 0959: * The WAKE_Q macro declares and initializes the list head. 0960: * wake_up_q() does NOT reinitialize the list; it's expected to be 0961: * called near the end of a function, where the fact that the queue is 0962: * not used again will be easy to see by inspection. 0963: * 0964: * Note that this can cause spurious wakeups. schedule() callers 0965: * must ensure the call is done inside a loop, confirming that the 0966: * wakeup condition has in fact occurred. 0967: */ 0968: struct wake_q_node { 0969: struct wake_q_node *next; 0970: }; 0971: 0972: struct wake_q_head { 0973: struct wake_q_node *first; 0974: struct wake_q_node **lastp; 0975: }; 0976: 0977: #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) 0978: 0979: #define WAKE_Q(name) \ 0980: struct wake_q_head name = { WAKE_Q_TAIL, &name.first } 0981: 0982: extern void wake_q_add(struct wake_q_head *head, 0983: struct task_struct *task); 0984: extern void wake_up_q(struct wake_q_head *head); 0985: 0986: /* 0987: * sched-domains (multiprocessor balancing) declarations: 0988: */ 0989: #ifdef CONFIG_SMP 0990: #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 0991: #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 0992: #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 0993: #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 0994: #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 0995: #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 0996: #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ 0997: #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ 0998: #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 0999: #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 1000: #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 1001: #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 1002: #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 1003: #define SD_NUMA 0x4000 /* cross-node balancing */ 1004: 1005: #ifdef CONFIG_SCHED_SMT 1006: static inline int cpu_smt_flags(void) 1007: { 1008: return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 1009: } 1010: #endif 1011: 1012: #ifdef CONFIG_SCHED_MC 1013: static inline int cpu_core_flags(void) 1014: { 1015: return SD_SHARE_PKG_RESOURCES; 1016: } 1017: #endif 1018: 1019: #ifdef CONFIG_NUMA 1020: static inline int cpu_numa_flags(void) 1021: { 1022: return SD_NUMA; 1023: } 1024: #endif 1025: 1026: struct sched_domain_attr { 1027: int relax_domain_level; 1028: }; 1029: 1030: #define SD_ATTR_INIT (struct sched_domain_attr) { \ 1031: .relax_domain_level = -1, \ 1032: } 1033: 1034: extern int sched_domain_level_max; 1035: 1036: struct sched_group; 1037: 1038: struct sched_domain { 1039: /* These fields must be setup */ 1040: struct sched_domain *parent; /* top domain must be null terminated */ 1041: struct sched_domain *child; /* bottom domain must be null terminated */ 1042: struct sched_group *groups; /* the balancing groups of the domain */ 1043: unsigned long min_interval; /* Minimum balance interval ms */ 1044: unsigned long max_interval; /* Maximum balance interval ms */ 1045: unsigned int busy_factor; /* less balancing by factor if busy */ 1046: unsigned int imbalance_pct; /* No balance until over watermark */ 1047: unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 1048: unsigned int busy_idx; 1049: unsigned int idle_idx; 1050: unsigned int newidle_idx; 1051: unsigned int wake_idx; 1052: unsigned int forkexec_idx; 1053: unsigned int smt_gain; 1054: 1055: int nohz_idle; /* NOHZ IDLE status */ 1056: int flags; /* See SD_* */ 1057: int level; 1058: 1059: /* Runtime fields. */ 1060: unsigned long last_balance; /* init to jiffies. units in jiffies */ 1061: unsigned int balance_interval; /* initialise to 1. units in ms. */ 1062: unsigned int nr_balance_failed; /* initialise to 0 */ 1063: 1064: /* idle_balance() stats */ 1065: u64 max_newidle_lb_cost; 1066: unsigned long next_decay_max_lb_cost; 1067: 1068: #ifdef CONFIG_SCHEDSTATS 1069: /* load_balance() stats */ 1070: unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 1071: unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 1072: unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 1073: unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 1074: unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 1075: unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 1076: unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 1077: unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 1078: 1079: /* Active load balancing */ 1080: unsigned int alb_count; 1081: unsigned int alb_failed; 1082: unsigned int alb_pushed; 1083: 1084: /* SD_BALANCE_EXEC stats */ 1085: unsigned int sbe_count; 1086: unsigned int sbe_balanced; 1087: unsigned int sbe_pushed; 1088: 1089: /* SD_BALANCE_FORK stats */ 1090: unsigned int sbf_count; 1091: unsigned int sbf_balanced; 1092: unsigned int sbf_pushed; 1093: 1094: /* try_to_wake_up() stats */ 1095: unsigned int ttwu_wake_remote; 1096: unsigned int ttwu_move_affine; 1097: unsigned int ttwu_move_balance; 1098: #endif 1099: #ifdef CONFIG_SCHED_DEBUG 1100: char *name; 1101: #endif 1102: union { 1103: void *private; /* used during construction */ 1104: struct rcu_head rcu; /* used during destruction */ 1105: }; 1106: 1107: unsigned int span_weight; 1108: /* 1109: * Span of all CPUs in this domain. 1110: * 1111: * NOTE: this field is variable length. (Allocated dynamically 1112: * by attaching extra space to the end of the structure, 1113: * depending on how many CPUs the kernel has booted up with) 1114: */ 1115: unsigned long span[0]; 1116: }; 1117: 1118: static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 1119: { 1120: return to_cpumask(sd->span); 1121: } 1122: 1123: extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1124: struct sched_domain_attr *dattr_new); 1125: 1126: /* Allocate an array of sched domains, for partition_sched_domains(). */ 1127: cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 1128: void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 1129: 1130: bool cpus_share_cache(int this_cpu, int that_cpu); 1131: 1132: typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1133: typedef int (*sched_domain_flags_f)(void); 1134: 1135: #define SDTL_OVERLAP 0x01 1136: 1137: struct sd_data { 1138: struct sched_domain **__percpu sd; 1139: struct sched_group **__percpu sg; 1140: struct sched_group_capacity **__percpu sgc; 1141: }; 1142: 1143: struct sched_domain_topology_level { 1144: sched_domain_mask_f mask; 1145: sched_domain_flags_f sd_flags; 1146: int flags; 1147: int numa_level; 1148: struct sd_data data; 1149: #ifdef CONFIG_SCHED_DEBUG 1150: char *name; 1151: #endif 1152: }; 1153: 1154: extern void set_sched_topology(struct sched_domain_topology_level *tl); 1155: extern void wake_up_if_idle(int cpu); 1156: 1157: #ifdef CONFIG_SCHED_DEBUG 1158: # define SD_INIT_NAME(type) .name = #type 1159: #else 1160: # define SD_INIT_NAME(type) 1161: #endif 1162: 1163: #else /* CONFIG_SMP */ 1164: 1165: struct sched_domain_attr; 1166: 1167: static inline void 1168: partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1169: struct sched_domain_attr *dattr_new) 1170: { 1171: } 1172: 1173: static inline bool cpus_share_cache(int this_cpu, int that_cpu) 1174: { 1175: return true; 1176: } 1177: 1178: #endif /* !CONFIG_SMP */ 1179: 1180: 1181: struct io_context; /* See blkdev.h */ 1182: 1183: 1184: #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 1185: extern void prefetch_stack(struct task_struct *t); 1186: #else 1187: static inline void prefetch_stack(struct task_struct *t) { } 1188: #endif 1189: 1190: struct audit_context; /* See audit.c */ 1191: struct mempolicy; 1192: struct pipe_inode_info; 1193: struct uts_namespace; 1194: 1195: struct load_weight { 1196: unsigned long weight; 1197: u32 inv_weight; 1198: }; 1199: 1200: /* 1201: * The load_avg/util_avg accumulates an infinite geometric series. 1202: * 1) load_avg factors frequency scaling into the amount of time that a 1203: * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the 1204: * aggregated such weights of all runnable and blocked sched_entities. 1205: * 2) util_avg factors frequency and cpu scaling into the amount of time 1206: * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. 1207: * For cfs_rq, it is the aggregated such times of all runnable and 1208: * blocked sched_entities. 1209: * The 64 bit load_sum can: 1210: * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with 1211: * the highest weight (=88761) always runnable, we should not overflow 1212: * 2) for entity, support any load.weight always runnable 1213: */ 1214: struct sched_avg { 1215: u64 last_update_time, load_sum; 1216: u32 util_sum, period_contrib; 1217: unsigned long load_avg, util_avg; 1218: }; 1219: 1220: #ifdef CONFIG_SCHEDSTATS 1221: struct sched_statistics { 1222: u64 wait_start; 1223: u64 wait_max; 1224: u64 wait_count; 1225: u64 wait_sum; 1226: u64 iowait_count; 1227: u64 iowait_sum; 1228: 1229: u64 sleep_start; 1230: u64 sleep_max; 1231: s64 sum_sleep_runtime; 1232: 1233: u64 block_start; 1234: u64 block_max; 1235: u64 exec_max; 1236: u64 slice_max; 1237: 1238: u64 nr_migrations_cold; 1239: u64 nr_failed_migrations_affine; 1240: u64 nr_failed_migrations_running; 1241: u64 nr_failed_migrations_hot; 1242: u64 nr_forced_migrations; 1243: 1244: u64 nr_wakeups; 1245: u64 nr_wakeups_sync; 1246: u64 nr_wakeups_migrate; 1247: u64 nr_wakeups_local; 1248: u64 nr_wakeups_remote; 1249: u64 nr_wakeups_affine; 1250: u64 nr_wakeups_affine_attempts; 1251: u64 nr_wakeups_passive; 1252: u64 nr_wakeups_idle; 1253: }; 1254: #endif 1255: 1256: struct sched_entity { 1257: struct load_weight load; /* for load-balancing */ 1258: struct rb_node run_node; 1259: struct list_head group_node; 1260: unsigned int on_rq; 1261: 1262: u64 exec_start; 1263: u64 sum_exec_runtime; 1264: u64 vruntime; 1265: u64 prev_sum_exec_runtime; 1266: 1267: u64 nr_migrations; 1268: 1269: #ifdef CONFIG_SCHEDSTATS 1270: struct sched_statistics statistics; 1271: #endif 1272: 1273: #ifdef CONFIG_FAIR_GROUP_SCHED 1274: int depth; 1275: struct sched_entity *parent; 1276: /* rq on which this entity is (to be) queued: */ 1277: struct cfs_rq *cfs_rq; 1278: /* rq "owned" by this entity/group: */ 1279: struct cfs_rq *my_q; 1280: #endif 1281: 1282: #ifdef CONFIG_SMP 1283: /* Per entity load average tracking */ 1284: struct sched_avg avg; 1285: #endif 1286: }; 1287: 1288: struct sched_rt_entity { 1289: struct list_head run_list; 1290: unsigned long timeout; 1291: unsigned long watchdog_stamp; 1292: unsigned int time_slice; 1293: 1294: struct sched_rt_entity *back; 1295: #ifdef CONFIG_RT_GROUP_SCHED 1296: struct sched_rt_entity *parent; 1297: /* rq on which this entity is (to be) queued: */ 1298: struct rt_rq *rt_rq; 1299: /* rq "owned" by this entity/group: */ 1300: struct rt_rq *my_q; 1301: #endif 1302: }; 1303: 1304: struct sched_dl_entity { 1305: struct rb_node rb_node; 1306: 1307: /* 1308: * Original scheduling parameters. Copied here from sched_attr 1309: * during sched_setattr(), they will remain the same until 1310: * the next sched_setattr(). 1311: */ 1312: u64 dl_runtime; /* maximum runtime for each instance */ 1313: u64 dl_deadline; /* relative deadline of each instance */ 1314: u64 dl_period; /* separation of two instances (period) */ 1315: u64 dl_bw; /* dl_runtime / dl_deadline */ 1316: u64 dl_density; /* dl_runtime / dl_deadline */ 1317: 1318: /* 1319: * Actual scheduling parameters. Initialized with the values above, 1320: * they are continously updated during task execution. Note that 1321: * the remaining runtime could be < 0 in case we are in overrun. 1322: */ 1323: s64 runtime; /* remaining runtime for this instance */ 1324: u64 deadline; /* absolute deadline for this instance */ 1325: unsigned int flags; /* specifying the scheduler behaviour */ 1326: 1327: /* 1328: * Some bool flags: 1329: * 1330: * @dl_throttled tells if we exhausted the runtime. If so, the 1331: * task has to wait for a replenishment to be performed at the 1332: * next firing of dl_timer. 1333: * 1334: * @dl_new tells if a new instance arrived. If so we must 1335: * start executing it with full runtime and reset its absolute 1336: * deadline; 1337: * 1338: * @dl_boosted tells if we are boosted due to DI. If so we are 1339: * outside bandwidth enforcement mechanism (but only until we 1340: * exit the critical section); 1341: * 1342: * @dl_yielded tells if task gave up the cpu before consuming 1343: * all its available runtime during the last job. 1344: */ 1345: int dl_throttled, dl_new, dl_boosted, dl_yielded; 1346: 1347: /* 1348: * Bandwidth enforcement timer. Each -deadline task has its 1349: * own bandwidth to be enforced, thus we need one timer per task. 1350: */ 1351: struct hrtimer dl_timer; 1352: }; 1353: 1354: union rcu_special { 1355: struct { 1356: u8 blocked; 1357: u8 need_qs; 1358: u8 exp_need_qs; 1359: u8 pad; /* Otherwise the compiler can store garbage here. */ 1360: } b; /* Bits. */ 1361: u32 s; /* Set of bits. */ 1362: }; 1363: struct rcu_node; 1364: 1365: enum perf_event_task_context { 1366: perf_invalid_context = -1, 1367: perf_hw_context = 0, 1368: perf_sw_context, 1369: perf_nr_task_contexts, 1370: }; 1371: 1372: /* Track pages that require TLB flushes */ 1373: struct tlbflush_unmap_batch { 1374: /* 1375: * Each bit set is a CPU that potentially has a TLB entry for one of 1376: * the PFNs being flushed. See set_tlb_ubc_flush_pending(). 1377: */ 1378: struct cpumask cpumask; 1379: 1380: /* True if any bit in cpumask is set */ 1381: bool flush_required; 1382: 1383: /* 1384: * If true then the PTE was dirty when unmapped. The entry must be 1385: * flushed before IO is initiated or a stale TLB entry potentially 1386: * allows an update without redirtying the page. 1387: */ 1388: bool writable; 1389: }; 1390: 1391: struct task_struct { 1392: volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1393: void *stack; 1394: atomic_t usage; 1395: unsigned int flags; /* per process flags, defined below */ 1396: unsigned int ptrace; 1397: 1398: #ifdef CONFIG_SMP 1399: struct llist_node wake_entry; 1400: int on_cpu; 1401: unsigned int wakee_flips; 1402: unsigned long wakee_flip_decay_ts; 1403: struct task_struct *last_wakee; 1404: 1405: int wake_cpu; 1406: #endif 1407: int on_rq; 1408: 1409: int prio, static_prio, normal_prio; 1410: unsigned int rt_priority; 1411: const struct sched_class *sched_class; 1412: struct sched_entity se; 1413: struct sched_rt_entity rt; 1414: #ifdef CONFIG_CGROUP_SCHED 1415: struct task_group *sched_task_group; 1416: #endif 1417: struct sched_dl_entity dl; 1418: 1419: #ifdef CONFIG_PREEMPT_NOTIFIERS 1420: /* list of struct preempt_notifier: */ 1421: struct hlist_head preempt_notifiers; 1422: #endif 1423: 1424: #ifdef CONFIG_BLK_DEV_IO_TRACE 1425: unsigned int btrace_seq; 1426: #endif 1427: 1428: unsigned int policy; 1429: int nr_cpus_allowed; 1430: cpumask_t cpus_allowed; 1431: 1432: #ifdef CONFIG_PREEMPT_RCU 1433: int rcu_read_lock_nesting; 1434: union rcu_special rcu_read_unlock_special; 1435: struct list_head rcu_node_entry; 1436: struct rcu_node *rcu_blocked_node; 1437: #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1438: #ifdef CONFIG_TASKS_RCU 1439: unsigned long rcu_tasks_nvcsw; 1440: bool rcu_tasks_holdout; 1441: struct list_head rcu_tasks_holdout_list; 1442: int rcu_tasks_idle_cpu; 1443: #endif /* #ifdef CONFIG_TASKS_RCU */ 1444: 1445: #ifdef CONFIG_SCHED_INFO 1446: struct sched_info sched_info; 1447: #endif 1448: 1449: struct list_head tasks; 1450: #ifdef CONFIG_SMP 1451: struct plist_node pushable_tasks; 1452: struct rb_node pushable_dl_tasks; 1453: #endif 1454: 1455: struct mm_struct *mm, *active_mm; 1456: /* per-thread vma caching */ 1457: u32 vmacache_seqnum; 1458: struct vm_area_struct *vmacache[VMACACHE_SIZE]; 1459: #if defined(SPLIT_RSS_COUNTING) 1460: struct task_rss_stat rss_stat; 1461: #endif 1462: /* task state */ 1463: int exit_state; 1464: int exit_code, exit_signal; 1465: int pdeath_signal; /* The signal sent when the parent dies */ 1466: unsigned long jobctl; /* JOBCTL_*, siglock protected */ 1467: 1468: /* Used for emulating ABI behavior of previous Linux versions */ 1469: unsigned int personality; 1470: 1471: /* scheduler bits, serialized by scheduler locks */ 1472: unsigned sched_reset_on_fork:1; 1473: unsigned sched_contributes_to_load:1; 1474: unsigned sched_migrated:1; 1475: unsigned :0; /* force alignment to the next boundary */ 1476: 1477: /* unserialized, strictly 'current' */ 1478: unsigned in_execve:1; /* bit to tell LSMs we're in execve */ 1479: unsigned in_iowait:1; 1480: #ifdef CONFIG_MEMCG 1481: unsigned memcg_may_oom:1; 1482: #endif 1483: #ifdef CONFIG_MEMCG_KMEM 1484: unsigned memcg_kmem_skip_account:1; 1485: #endif 1486: #ifdef CONFIG_COMPAT_BRK 1487: unsigned brk_randomized:1; 1488: #endif 1489: #ifdef CONFIG_CGROUPS 1490: /* disallow userland-initiated cgroup migration */ 1491: unsigned no_cgroup_migration:1; 1492: #endif 1493: 1494: unsigned long atomic_flags; /* Flags needing atomic access. */ 1495: 1496: struct restart_block restart_block; 1497: 1498: pid_t pid; 1499: pid_t tgid; 1500: 1501: #ifdef CONFIG_CC_STACKPROTECTOR 1502: /* Canary value for the -fstack-protector gcc feature */ 1503: unsigned long stack_canary; 1504: #endif 1505: /* 1506: * pointers to (original) parent process, youngest child, younger sibling, 1507: * older sibling, respectively. (p->father can be replaced with 1508: * p->real_parent->pid) 1509: */ 1510: struct task_struct __rcu *real_parent; /* real parent process */ 1511: struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ 1512: /* 1513: * children/sibling forms the list of my natural children 1514: */ 1515: struct list_head children; /* list of my children */ 1516: struct list_head sibling; /* linkage in my parent's children list */ 1517: struct task_struct *group_leader; /* threadgroup leader */ 1518: 1519: /* 1520: * ptraced is the list of tasks this task is using ptrace on. 1521: * This includes both natural children and PTRACE_ATTACH targets. 1522: * p->ptrace_entry is p's link on the p->parent->ptraced list. 1523: */ 1524: struct list_head ptraced; 1525: struct list_head ptrace_entry; 1526: 1527: /* PID/PID hash table linkage. */ 1528: struct pid_link pids[PIDTYPE_MAX]; 1529: struct list_head thread_group; 1530: struct list_head thread_node; 1531: 1532: struct completion *vfork_done; /* for vfork() */ 1533: int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1534: int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1535: 1536: cputime_t utime, stime, utimescaled, stimescaled; 1537: cputime_t gtime; 1538: struct prev_cputime prev_cputime; 1539: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1540: seqlock_t vtime_seqlock; 1541: unsigned long long vtime_snap; 1542: enum { 1543: VTIME_SLEEPING = 0, 1544: VTIME_USER, 1545: VTIME_SYS, 1546: } vtime_snap_whence; 1547: #endif 1548: unsigned long nvcsw, nivcsw; /* context switch counts */ 1549: u64 start_time; /* monotonic time in nsec */ 1550: u64 real_start_time; /* boot based time in nsec */ 1551: /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1552: unsigned long min_flt, maj_flt; 1553: 1554: struct task_cputime cputime_expires; 1555: struct list_head cpu_timers[3]; 1556: 1557: /* process credentials */ 1558: const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ 1559: const struct cred __rcu *real_cred; /* objective and real subjective task 1560: * credentials (COW) */ 1561: const struct cred __rcu *cred; /* effective (overridable) subjective task 1562: * credentials (COW) */ 1563: char comm[TASK_COMM_LEN]; /* executable name excluding path 1564: - access with [gs]et_task_comm (which lock 1565: it with task_lock()) 1566: - initialized normally by setup_new_exec */ 1567: /* file system info */ 1568: struct nameidata *nameidata; 1569: #ifdef CONFIG_SYSVIPC 1570: /* ipc stuff */ 1571: struct sysv_sem sysvsem; 1572: struct sysv_shm sysvshm; 1573: #endif 1574: #ifdef CONFIG_DETECT_HUNG_TASK 1575: /* hung task detection */ 1576: unsigned long last_switch_count; 1577: #endif 1578: /* filesystem information */ 1579: struct fs_struct *fs; 1580: /* open file information */ 1581: struct files_struct *files; 1582: /* namespaces */ 1583: struct nsproxy *nsproxy; 1584: /* signal handlers */ 1585: struct signal_struct *signal; 1586: struct sighand_struct *sighand; 1587: 1588: sigset_t blocked, real_blocked; 1589: sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1590: struct sigpending pending; 1591: 1592: unsigned long sas_ss_sp; 1593: size_t sas_ss_size; 1594: 1595: struct callback_head *task_works; 1596: 1597: struct audit_context *audit_context; 1598: #ifdef CONFIG_AUDITSYSCALL 1599: kuid_t loginuid; 1600: unsigned int sessionid; 1601: #endif 1602: struct seccomp seccomp; 1603: 1604: /* Thread group tracking */ 1605: u32 parent_exec_id; 1606: u32 self_exec_id; 1607: /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1608: * mempolicy */ 1609: spinlock_t alloc_lock; 1610: 1611: /* Protection of the PI data structures: */ 1612: raw_spinlock_t pi_lock; 1613: 1614: struct wake_q_node wake_q; 1615: 1616: #ifdef CONFIG_RT_MUTEXES 1617: /* PI waiters blocked on a rt_mutex held by this task */ 1618: struct rb_root pi_waiters; 1619: struct rb_node *pi_waiters_leftmost; 1620: /* Deadlock detection and priority inheritance handling */ 1621: struct rt_mutex_waiter *pi_blocked_on; 1622: #endif 1623: 1624: #ifdef CONFIG_DEBUG_MUTEXES 1625: /* mutex deadlock detection */ 1626: struct mutex_waiter *blocked_on; 1627: #endif 1628: #ifdef CONFIG_TRACE_IRQFLAGS 1629: unsigned int irq_events; 1630: unsigned long hardirq_enable_ip; 1631: unsigned long hardirq_disable_ip; 1632: unsigned int hardirq_enable_event; 1633: unsigned int hardirq_disable_event; 1634: int hardirqs_enabled; 1635: int hardirq_context; 1636: unsigned long softirq_disable_ip; 1637: unsigned long softirq_enable_ip; 1638: unsigned int softirq_disable_event; 1639: unsigned int softirq_enable_event; 1640: int softirqs_enabled; 1641: int softirq_context; 1642: #endif 1643: #ifdef CONFIG_LOCKDEP 1644: # define MAX_LOCK_DEPTH 48UL 1645: u64 curr_chain_key; 1646: int lockdep_depth; 1647: unsigned int lockdep_recursion; 1648: struct held_lock held_locks[MAX_LOCK_DEPTH]; 1649: gfp_t lockdep_reclaim_gfp; 1650: #endif 1651: 1652: /* journalling filesystem info */ 1653: void *journal_info; 1654: 1655: /* stacked block device info */ 1656: struct bio_list *bio_list; 1657: 1658: #ifdef CONFIG_BLOCK 1659: /* stack plugging */ 1660: struct blk_plug *plug; 1661: #endif 1662: 1663: /* VM state */ 1664: struct reclaim_state *reclaim_state; 1665: 1666: struct backing_dev_info *backing_dev_info; 1667: 1668: struct io_context *io_context; 1669: 1670: unsigned long ptrace_message; 1671: siginfo_t *last_siginfo; /* For ptrace use. */ 1672: struct task_io_accounting ioac; 1673: #if defined(CONFIG_TASK_XACCT) 1674: u64 acct_rss_mem1; /* accumulated rss usage */ 1675: u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1676: cputime_t acct_timexpd; /* stime + utime since last update */ 1677: #endif 1678: #ifdef CONFIG_CPUSETS 1679: nodemask_t mems_allowed; /* Protected by alloc_lock */ 1680: seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ 1681: int cpuset_mem_spread_rotor; 1682: int cpuset_slab_spread_rotor; 1683: #endif 1684: #ifdef CONFIG_CGROUPS 1685: /* Control Group info protected by css_set_lock */ 1686: struct css_set __rcu *cgroups; 1687: /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1688: struct list_head cg_list; 1689: #endif 1690: #ifdef CONFIG_FUTEX 1691: struct robust_list_head __user *robust_list; 1692: #ifdef CONFIG_COMPAT 1693: struct compat_robust_list_head __user *compat_robust_list; 1694: #endif 1695: struct list_head pi_state_list; 1696: struct futex_pi_state *pi_state_cache; 1697: #endif 1698: #ifdef CONFIG_PERF_EVENTS 1699: struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1700: struct mutex perf_event_mutex; 1701: struct list_head perf_event_list; 1702: #endif 1703: #ifdef CONFIG_DEBUG_PREEMPT 1704: unsigned long preempt_disable_ip; 1705: #endif 1706: #ifdef CONFIG_NUMA 1707: struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1708: short il_next; 1709: short pref_node_fork; 1710: #endif 1711: #ifdef CONFIG_NUMA_BALANCING 1712: int numa_scan_seq; 1713: unsigned int numa_scan_period; 1714: unsigned int numa_scan_period_max; 1715: int numa_preferred_nid; 1716: unsigned long numa_migrate_retry; 1717: u64 node_stamp; /* migration stamp */ 1718: u64 last_task_numa_placement; 1719: u64 last_sum_exec_runtime; 1720: struct callback_head numa_work; 1721: 1722: struct list_head numa_entry; 1723: struct numa_group *numa_group; 1724: 1725: /* 1726: * numa_faults is an array split into four regions: 1727: * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 1728: * in this precise order. 1729: * 1730: * faults_memory: Exponential decaying average of faults on a per-node 1731: * basis. Scheduling placement decisions are made based on these 1732: * counts. The values remain static for the duration of a PTE scan. 1733: * faults_cpu: Track the nodes the process was running on when a NUMA 1734: * hinting fault was incurred. 1735: * faults_memory_buffer and faults_cpu_buffer: Record faults per node 1736: * during the current scan window. When the scan completes, the counts 1737: * in faults_memory and faults_cpu decay and these values are copied. 1738: */ 1739: unsigned long *numa_faults; 1740: unsigned long total_numa_faults; 1741: 1742: /* 1743: * numa_faults_locality tracks if faults recorded during the last 1744: * scan window were remote/local or failed to migrate. The task scan 1745: * period is adapted based on the locality of the faults with different 1746: * weights depending on whether they were shared or private faults 1747: */ 1748: unsigned long numa_faults_locality[3]; 1749: 1750: unsigned long numa_pages_migrated; 1751: #endif /* CONFIG_NUMA_BALANCING */ 1752: 1753: #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 1754: struct tlbflush_unmap_batch tlb_ubc; 1755: #endif 1756: 1757: struct rcu_head rcu; 1758: 1759: /* 1760: * cache last used pipe for splice 1761: */ 1762: struct pipe_inode_info *splice_pipe; 1763: 1764: struct page_frag task_frag; 1765: 1766: #ifdef CONFIG_TASK_DELAY_ACCT 1767: struct task_delay_info *delays; 1768: #endif 1769: #ifdef CONFIG_FAULT_INJECTION 1770: int make_it_fail; 1771: #endif 1772: /* 1773: * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1774: * balance_dirty_pages() for some dirty throttling pause 1775: */ 1776: int nr_dirtied; 1777: int nr_dirtied_pause; 1778: unsigned long dirty_paused_when; /* start of a write-and-pause period */ 1779: 1780: #ifdef CONFIG_LATENCYTOP 1781: int latency_record_count; 1782: struct latency_record latency_record[LT_SAVECOUNT]; 1783: #endif 1784: /* 1785: * time slack values; these are used to round up poll() and 1786: * select() etc timeout values. These are in nanoseconds. 1787: */ 1788: unsigned long timer_slack_ns; 1789: unsigned long default_timer_slack_ns; 1790: 1791: #ifdef CONFIG_KASAN 1792: unsigned int kasan_depth; 1793: #endif 1794: #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1795: /* Index of current stored address in ret_stack */ 1796: int curr_ret_stack; 1797: /* Stack of return addresses for return function tracing */ 1798: struct ftrace_ret_stack *ret_stack; 1799: /* time stamp for last schedule */ 1800: unsigned long long ftrace_timestamp; 1801: /* 1802: * Number of functions that haven't been traced 1803: * because of depth overrun. 1804: */ 1805: atomic_t trace_overrun; 1806: /* Pause for the tracing */ 1807: atomic_t tracing_graph_pause; 1808: #endif 1809: #ifdef CONFIG_TRACING 1810: /* state flags for use by tracers */ 1811: unsigned long trace; 1812: /* bitmask and counter of trace recursion */ 1813: unsigned long trace_recursion; 1814: #endif /* CONFIG_TRACING */ 1815: #ifdef CONFIG_MEMCG 1816: struct mem_cgroup *memcg_in_oom; 1817: gfp_t memcg_oom_gfp_mask; 1818: int memcg_oom_order; 1819: 1820: /* number of pages to reclaim on returning to userland */ 1821: unsigned int memcg_nr_pages_over_high; 1822: #endif 1823: #ifdef CONFIG_UPROBES 1824: struct uprobe_task *utask; 1825: #endif 1826: #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1827: unsigned int sequential_io; 1828: unsigned int sequential_io_avg; 1829: #endif 1830: #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1831: unsigned long task_state_change; 1832: #endif 1833: int pagefault_disabled; 1834: /* CPU-specific state of this task */ 1835: struct thread_struct thread; 1836: /* 1837: * WARNING: on x86, 'thread_struct' contains a variable-sized 1838: * structure. It *MUST* be at the end of 'task_struct'. 1839: * 1840: * Do not put anything below here! 1841: */ 1842: }; 1843: 1844: #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 1845: extern int arch_task_struct_size __read_mostly; 1846: #else 1847: # define arch_task_struct_size (sizeof(struct task_struct)) 1848: #endif 1849: 1850: /* Future-safe accessor for struct task_struct's cpus_allowed. */ 1851: #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1852: 1853: #define TNF_MIGRATED 0x01 1854: #define TNF_NO_GROUP 0x02 1855: #define TNF_SHARED 0x04 1856: #define TNF_FAULT_LOCAL 0x08 1857: #define TNF_MIGRATE_FAIL 0x10 1858: 1859: #ifdef CONFIG_NUMA_BALANCING 1860: extern void task_numa_fault(int last_node, int node, int pages, int flags); 1861: extern pid_t task_numa_group_id(struct task_struct *p); 1862: extern void set_numabalancing_state(bool enabled); 1863: extern void task_numa_free(struct task_struct *p); 1864: extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, 1865: int src_nid, int dst_cpu); 1866: #else 1867: static inline void task_numa_fault(int last_node, int node, int pages, 1868: int flags) 1869: { 1870: } 1871: static inline pid_t task_numa_group_id(struct task_struct *p) 1872: { 1873: return 0; 1874: } 1875: static inline void set_numabalancing_state(bool enabled) 1876: { 1877: } 1878: static inline void task_numa_free(struct task_struct *p) 1879: { 1880: } 1881: static inline bool should_numa_migrate_memory(struct task_struct *p, 1882: struct page *page, int src_nid, int dst_cpu) 1883: { 1884: return true; 1885: } 1886: #endif 1887: 1888: static inline struct pid *task_pid(struct task_struct *task) 1889: { 1890: return task->pids[PIDTYPE_PID].pid; 1891: } 1892: 1893: static inline struct pid *task_tgid(struct task_struct *task) 1894: { 1895: return task->group_leader->pids[PIDTYPE_PID].pid; 1896: } 1897: 1898: /* 1899: * Without tasklist or rcu lock it is not safe to dereference 1900: * the result of task_pgrp/task_session even if task == current, 1901: * we can race with another thread doing sys_setsid/sys_setpgid. 1902: */ 1903: static inline struct pid *task_pgrp(struct task_struct *task) 1904: { 1905: return task->group_leader->pids[PIDTYPE_PGID].pid; 1906: } 1907: 1908: static inline struct pid *task_session(struct task_struct *task) 1909: { 1910: return task->group_leader->pids[PIDTYPE_SID].pid; 1911: } 1912: 1913: struct pid_namespace; 1914: 1915: /* 1916: * the helpers to get the task's different pids as they are seen 1917: * from various namespaces 1918: * 1919: * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1920: * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1921: * current. 1922: * task_xid_nr_ns() : id seen from the ns specified; 1923: * 1924: * set_task_vxid() : assigns a virtual id to a task; 1925: * 1926: * see also pid_nr() etc in include/linux/pid.h 1927: */ 1928: pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1929: struct pid_namespace *ns); 1930: 1931: static inline pid_t task_pid_nr(struct task_struct *tsk) 1932: { 1933: return tsk->pid; 1934: } 1935: 1936: static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1937: struct pid_namespace *ns) 1938: { 1939: return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1940: } 1941: 1942: static inline pid_t task_pid_vnr(struct task_struct *tsk) 1943: { 1944: return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1945: } 1946: 1947: 1948: static inline pid_t task_tgid_nr(struct task_struct *tsk) 1949: { 1950: return tsk->tgid; 1951: } 1952: 1953: 1954: static inline int pid_alive(const struct task_struct *p); 1955: 1956: static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1957: struct pid_namespace *ns) 1958: { 1959: return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1960: } 1961: 1962: static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1963: { 1964: return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1965: } 1966: 1967: 1968: static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1969: struct pid_namespace *ns) 1970: { 1971: return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1972: } 1973: 1974: static inline pid_t task_session_vnr(struct task_struct *tsk) 1975: { 1976: return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1977: } 1978: 1979: static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1980: { 1981: return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns); 1982: } 1983: 1984: static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1985: { 1986: return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); 1987: } 1988: 1989: static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1990: { 1991: pid_t pid = 0; 1992: 1993: rcu_read_lock(); 1994: if (pid_alive(tsk)) 1995: pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1996: rcu_read_unlock(); 1997: 1998: return pid; 1999: } 2000: 2001: static inline pid_t task_ppid_nr(const struct task_struct *tsk) 2002: { 2003: return task_ppid_nr_ns(tsk, &init_pid_ns); 2004: } 2005: 2006: /* obsolete, do not use */ 2007: static inline pid_t task_pgrp_nr(struct task_struct *tsk) 2008: { 2009: return task_pgrp_nr_ns(tsk, &init_pid_ns); 2010: } 2011: 2012: /** 2013: * pid_alive - check that a task structure is not stale 2014: * @p: Task structure to be checked. 2015: * 2016: * Test if a process is not yet dead (at most zombie state) 2017: * If pid_alive fails, then pointers within the task structure 2018: * can be stale and must not be dereferenced. 2019: * 2020: * Return: 1 if the process is alive. 0 otherwise. 2021: */ 2022: static inline int pid_alive(const struct task_struct *p) 2023: { 2024: return p->pids[PIDTYPE_PID].pid != NULL; 2025: } 2026: 2027: /** 2028: * is_global_init - check if a task structure is init. Since init 2029: * is free to have sub-threads we need to check tgid. 2030: * @tsk: Task structure to be checked. 2031: * 2032: * Check if a task structure is the first user space task the kernel created. 2033: * 2034: * Return: 1 if the task structure is init. 0 otherwise. 2035: */ 2036: static inline int is_global_init(struct task_struct *tsk) 2037: { 2038: return task_tgid_nr(tsk) == 1; 2039: } 2040: 2041: extern struct pid *cad_pid; 2042: 2043: extern void free_task(struct task_struct *tsk); 2044: #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 2045: 2046: extern void __put_task_struct(struct task_struct *t); 2047: 2048: static inline void put_task_struct(struct task_struct *t) 2049: { 2050: if (atomic_dec_and_test(&t->usage)) 2051: __put_task_struct(t); 2052: } 2053: 2054: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2055: extern void task_cputime(struct task_struct *t, 2056: cputime_t *utime, cputime_t *stime); 2057: extern void task_cputime_scaled(struct task_struct *t, 2058: cputime_t *utimescaled, cputime_t *stimescaled); 2059: extern cputime_t task_gtime(struct task_struct *t); 2060: #else 2061: static inline void task_cputime(struct task_struct *t, 2062: cputime_t *utime, cputime_t *stime) 2063: { 2064: if (utime) 2065: *utime = t->utime; 2066: if (stime) 2067: *stime = t->stime; 2068: } 2069: 2070: static inline void task_cputime_scaled(struct task_struct *t, 2071: cputime_t *utimescaled, 2072: cputime_t *stimescaled) 2073: { 2074: if (utimescaled) 2075: *utimescaled = t->utimescaled; 2076: if (stimescaled) 2077: *stimescaled = t->stimescaled; 2078: } 2079: 2080: static inline cputime_t task_gtime(struct task_struct *t) 2081: { 2082: return t->gtime; 2083: } 2084: #endif 2085: extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2086: extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2087: 2088: /* 2089: * Per process flags 2090: */ 2091: #define PF_EXITING 0x00000004 /* getting shut down */ 2092: #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 2093: #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 2094: #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 2095: #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 2096: #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 2097: #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 2098: #define PF_DUMPCORE 0x00000200 /* dumped core */ 2099: #define PF_SIGNALED 0x00000400 /* killed by a signal */ 2100: #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 2101: #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 2102: #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 2103: #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ 2104: #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 2105: #define PF_FROZEN 0x00010000 /* frozen for system suspend */ 2106: #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 2107: #define PF_KSWAPD 0x00040000 /* I am kswapd */ 2108: #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 2109: #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 2110: #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 2111: #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 2112: #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 2113: #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 2114: #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 2115: #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 2116: #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 2117: #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ 2118: 2119: /* 2120: * Only the _current_ task can read/write to tsk->flags, but other 2121: * tasks can access tsk->flags in readonly mode for example 2122: * with tsk_used_math (like during threaded core dumping). 2123: * There is however an exception to this rule during ptrace 2124: * or during fork: the ptracer task is allowed to write to the 2125: * child->flags of its traced child (same goes for fork, the parent 2126: * can write to the child->flags), because we're guaranteed the 2127: * child is not running and in turn not changing child->flags 2128: * at the same time the parent does it. 2129: */ 2130: #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 2131: #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 2132: #define clear_used_math() clear_stopped_child_used_math(current) 2133: #define set_used_math() set_stopped_child_used_math(current) 2134: #define conditional_stopped_child_used_math(condition, child) \ 2135: do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 2136: #define conditional_used_math(condition) \ 2137: conditional_stopped_child_used_math(condition, current) 2138: #define copy_to_stopped_child_used_math(child) \ 2139: do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 2140: /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 2141: #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 2142: #define used_math() tsk_used_math(current) 2143: 2144: /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags 2145: * __GFP_FS is also cleared as it implies __GFP_IO. 2146: */ 2147: static inline gfp_t memalloc_noio_flags(gfp_t flags) 2148: { 2149: if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 2150: flags &= ~(__GFP_IO | __GFP_FS); 2151: return flags; 2152: } 2153: 2154: static inline unsigned int memalloc_noio_save(void) 2155: { 2156: unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 2157: current->flags |= PF_MEMALLOC_NOIO; 2158: return flags; 2159: } 2160: 2161: static inline void memalloc_noio_restore(unsigned int flags) 2162: { 2163: current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 2164: } 2165: 2166: /* Per-process atomic flags. */ 2167: #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 2168: #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 2169: #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 2170: #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 2171: #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 2172: 2173: #define TASK_PFA_TEST(name, func) \ 2174: static inline bool task_##func(struct task_struct *p) \ 2175: { return test_bit(PFA_##name, &p->atomic_flags); } 2176: #define TASK_PFA_SET(name, func) \ 2177: static inline void task_set_##func(struct task_struct *p) \ 2178: { set_bit(PFA_##name, &p->atomic_flags); } 2179: #define TASK_PFA_CLEAR(name, func) \ 2180: static inline void task_clear_##func(struct task_struct *p) \ 2181: { clear_bit(PFA_##name, &p->atomic_flags); } 2182: 2183: TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 2184: TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 2185: 2186: TASK_PFA_TEST(SPREAD_PAGE, spread_page) 2187: TASK_PFA_SET(SPREAD_PAGE, spread_page) 2188: TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 2189: 2190: TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 2191: TASK_PFA_SET(SPREAD_SLAB, spread_slab) 2192: TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 2193: 2194: TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 2195: TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 2196: TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 2197: 2198: TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 2199: TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 2200: 2201: /* 2202: * task->jobctl flags 2203: */ 2204: #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ 2205: 2206: #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ 2207: #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ 2208: #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ 2209: #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ 2210: #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ 2211: #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 2212: #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 2213: 2214: #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) 2215: #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) 2216: #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) 2217: #define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) 2218: #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) 2219: #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) 2220: #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) 2221: 2222: #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 2223: #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 2224: 2225: extern bool task_set_jobctl_pending(struct task_struct *task, 2226: unsigned long mask); 2227: extern void task_clear_jobctl_trapping(struct task_struct *task); 2228: extern void task_clear_jobctl_pending(struct task_struct *task, 2229: unsigned long mask); 2230: 2231: static inline void rcu_copy_process(struct task_struct *p) 2232: { 2233: #ifdef CONFIG_PREEMPT_RCU 2234: p->rcu_read_lock_nesting = 0; 2235: p->rcu_read_unlock_special.s = 0; 2236: p->rcu_blocked_node = NULL; 2237: INIT_LIST_HEAD(&p->rcu_node_entry); 2238: #endif /* #ifdef CONFIG_PREEMPT_RCU */ 2239: #ifdef CONFIG_TASKS_RCU 2240: p->rcu_tasks_holdout = false; 2241: INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 2242: p->rcu_tasks_idle_cpu = -1; 2243: #endif /* #ifdef CONFIG_TASKS_RCU */ 2244: } 2245: 2246: static inline void tsk_restore_flags(struct task_struct *task, 2247: unsigned long orig_flags, unsigned long flags) 2248: { 2249: task->flags &= ~flags; 2250: task->flags |= orig_flags & flags; 2251: } 2252: 2253: extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, 2254: const struct cpumask *trial); 2255: extern int task_can_attach(struct task_struct *p, 2256: const struct cpumask *cs_cpus_allowed); 2257: #ifdef CONFIG_SMP 2258: extern void do_set_cpus_allowed(struct task_struct *p, 2259: const struct cpumask *new_mask); 2260: 2261: extern int set_cpus_allowed_ptr(struct task_struct *p, 2262: const struct cpumask *new_mask); 2263: #else 2264: static inline void do_set_cpus_allowed(struct task_struct *p, 2265: const struct cpumask *new_mask) 2266: { 2267: } 2268: static inline int set_cpus_allowed_ptr(struct task_struct *p, 2269: const struct cpumask *new_mask) 2270: { 2271: if (!cpumask_test_cpu(0, new_mask)) 2272: return -EINVAL; 2273: return 0; 2274: } 2275: #endif 2276: 2277: #ifdef CONFIG_NO_HZ_COMMON 2278: void calc_load_enter_idle(void); 2279: void calc_load_exit_idle(void); 2280: #else 2281: static inline void calc_load_enter_idle(void) { } 2282: static inline void calc_load_exit_idle(void) { } 2283: #endif /* CONFIG_NO_HZ_COMMON */ 2284: 2285: /* 2286: * Do not use outside of architecture code which knows its limitations. 2287: * 2288: * sched_clock() has no promise of monotonicity or bounded drift between 2289: * CPUs, use (which you should not) requires disabling IRQs. 2290: * 2291: * Please use one of the three interfaces below. 2292: */ 2293: extern unsigned long long notrace sched_clock(void); 2294: /* 2295: * See the comment in kernel/sched/clock.c 2296: */ 2297: extern u64 cpu_clock(int cpu); 2298: extern u64 local_clock(void); 2299: extern u64 running_clock(void); 2300: extern u64 sched_clock_cpu(int cpu); 2301: 2302: 2303: extern void sched_clock_init(void); 2304: 2305: #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2306: static inline void sched_clock_tick(void) 2307: { 2308: } 2309: 2310: static inline void sched_clock_idle_sleep_event(void) 2311: { 2312: } 2313: 2314: static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 2315: { 2316: } 2317: #else 2318: /* 2319: * Architectures can set this to 1 if they have specified 2320: * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 2321: * but then during bootup it turns out that sched_clock() 2322: * is reliable after all: 2323: */ 2324: extern int sched_clock_stable(void); 2325: extern void set_sched_clock_stable(void); 2326: extern void clear_sched_clock_stable(void); 2327: 2328: extern void sched_clock_tick(void); 2329: extern void sched_clock_idle_sleep_event(void); 2330: extern void sched_clock_idle_wakeup_event(u64 delta_ns); 2331: #endif 2332: 2333: #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2334: /* 2335: * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 2336: * The reason for this explicit opt-in is not to have perf penalty with 2337: * slow sched_clocks. 2338: */ 2339: extern void enable_sched_clock_irqtime(void); 2340: extern void disable_sched_clock_irqtime(void); 2341: #else 2342: static inline void enable_sched_clock_irqtime(void) {} 2343: static inline void disable_sched_clock_irqtime(void) {} 2344: #endif 2345: 2346: extern unsigned long long 2347: task_sched_runtime(struct task_struct *task); 2348: 2349: /* sched_exec is called by processes performing an exec */ 2350: #ifdef CONFIG_SMP 2351: extern void sched_exec(void); 2352: #else 2353: #define sched_exec() {} 2354: #endif 2355: 2356: extern void sched_clock_idle_sleep_event(void); 2357: extern void sched_clock_idle_wakeup_event(u64 delta_ns); 2358: 2359: #ifdef CONFIG_HOTPLUG_CPU 2360: extern void idle_task_exit(void); 2361: #else 2362: static inline void idle_task_exit(void) {} 2363: #endif 2364: 2365: #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) 2366: extern void wake_up_nohz_cpu(int cpu); 2367: #else 2368: static inline void wake_up_nohz_cpu(int cpu) { } 2369: #endif 2370: 2371: #ifdef CONFIG_NO_HZ_FULL 2372: extern bool sched_can_stop_tick(void); 2373: extern u64 scheduler_tick_max_deferment(void); 2374: #else 2375: static inline bool sched_can_stop_tick(void) { return false; } 2376: #endif 2377: 2378: #ifdef CONFIG_SCHED_AUTOGROUP 2379: extern void sched_autogroup_create_attach(struct task_struct *p); 2380: extern void sched_autogroup_detach(struct task_struct *p); 2381: extern void sched_autogroup_fork(struct signal_struct *sig); 2382: extern void sched_autogroup_exit(struct signal_struct *sig); 2383: #ifdef CONFIG_PROC_FS 2384: extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2385: extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 2386: #endif 2387: #else 2388: static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2389: static inline void sched_autogroup_detach(struct task_struct *p) { } 2390: static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2391: static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2392: #endif 2393: 2394: extern int yield_to(struct task_struct *p, bool preempt); 2395: extern void set_user_nice(struct task_struct *p, long nice); 2396: extern int task_prio(const struct task_struct *p); 2397: /** 2398: * task_nice - return the nice value of a given task. 2399: * @p: the task in question. 2400: * 2401: * Return: The nice value [ -20 ... 0 ... 19 ]. 2402: */ 2403: static inline int task_nice(const struct task_struct *p) 2404: { 2405: return PRIO_TO_NICE((p)->static_prio); 2406: } 2407: extern int can_nice(const struct task_struct *p, const int nice); 2408: extern int task_curr(const struct task_struct *p); 2409: extern int idle_cpu(int cpu); 2410: extern int sched_setscheduler(struct task_struct *, int, 2411: const struct sched_param *); 2412: extern int sched_setscheduler_nocheck(struct task_struct *, int, 2413: const struct sched_param *); 2414: extern int sched_setattr(struct task_struct *, 2415: const struct sched_attr *); 2416: extern struct task_struct *idle_task(int cpu); 2417: /** 2418: * is_idle_task - is the specified task an idle task? 2419: * @p: the task in question. 2420: * 2421: * Return: 1 if @p is an idle task. 0 otherwise. 2422: */ 2423: static inline bool is_idle_task(const struct task_struct *p) 2424: { 2425: return p->pid == 0; 2426: } 2427: extern struct task_struct *curr_task(int cpu); 2428: extern void set_curr_task(int cpu, struct task_struct *p); 2429: 2430: void yield(void); 2431: 2432: union thread_union { 2433: struct thread_info thread_info; 2434: unsigned long stack[THREAD_SIZE/sizeof(long)]; 2435: }; 2436: 2437: #ifndef __HAVE_ARCH_KSTACK_END 2438: static inline int kstack_end(void *addr) 2439: { 2440: /* Reliable end of stack detection: 2441: * Some APM bios versions misalign the stack 2442: */ 2443: return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 2444: } 2445: #endif 2446: 2447: extern union thread_union init_thread_union; 2448: extern struct task_struct init_task; 2449: 2450: extern struct mm_struct init_mm; 2451: 2452: extern struct pid_namespace init_pid_ns; 2453: 2454: /* 2455: * find a task by one of its numerical ids 2456: * 2457: * find_task_by_pid_ns(): 2458: * finds a task by its pid in the specified namespace 2459: * find_task_by_vpid(): 2460: * finds a task by its virtual pid 2461: * 2462: * see also find_vpid() etc in include/linux/pid.h 2463: */ 2464: 2465: extern struct task_struct *find_task_by_vpid(pid_t nr); 2466: extern struct task_struct *find_task_by_pid_ns(pid_t nr, 2467: struct pid_namespace *ns); 2468: 2469: /* per-UID process charging. */ 2470: extern struct user_struct * alloc_uid(kuid_t); 2471: static inline struct user_struct *get_uid(struct user_struct *u) 2472: { 2473: atomic_inc(&u->__count); 2474: return u; 2475: } 2476: extern void free_uid(struct user_struct *); 2477: 2478: #include <asm/current.h> 2479: 2480: extern void xtime_update(unsigned long ticks); 2481: 2482: extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2483: extern int wake_up_process(struct task_struct *tsk); 2484: extern void wake_up_new_task(struct task_struct *tsk); 2485: #ifdef CONFIG_SMP 2486: extern void kick_process(struct task_struct *tsk); 2487: #else 2488: static inline void kick_process(struct task_struct *tsk) { } 2489: #endif 2490: extern int sched_fork(unsigned long clone_flags, struct task_struct *p); 2491: extern void sched_dead(struct task_struct *p); 2492: 2493: extern void proc_caches_init(void); 2494: extern void flush_signals(struct task_struct *); 2495: extern void ignore_signals(struct task_struct *); 2496: extern void flush_signal_handlers(struct task_struct *, int force_default); 2497: extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2498: 2499: static inline int kernel_dequeue_signal(siginfo_t *info) 2500: { 2501: struct task_struct *tsk = current; 2502: siginfo_t __info; 2503: int ret; 2504: 2505: spin_lock_irq(&tsk->sighand->siglock); 2506: ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); 2507: spin_unlock_irq(&tsk->sighand->siglock); 2508: 2509: return ret; 2510: } 2511: 2512: static inline void kernel_signal_stop(void) 2513: { 2514: spin_lock_irq(¤t->sighand->siglock); 2515: if (current->jobctl & JOBCTL_STOP_DEQUEUED) 2516: __set_current_state(TASK_STOPPED); 2517: spin_unlock_irq(¤t->sighand->siglock); 2518: 2519: schedule(); 2520: } 2521: 2522: extern void release_task(struct task_struct * p); 2523: extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2524: extern int force_sigsegv(int, struct task_struct *); 2525: extern int force_sig_info(int, struct siginfo *, struct task_struct *); 2526: extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 2527: extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 2528: extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 2529: const struct cred *, u32); 2530: extern int kill_pgrp(struct pid *pid, int sig, int priv); 2531: extern int kill_pid(struct pid *pid, int sig, int priv); 2532: extern int kill_proc_info(int, struct siginfo *, pid_t); 2533: extern __must_check bool do_notify_parent(struct task_struct *, int); 2534: extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2535: extern void force_sig(int, struct task_struct *); 2536: extern int send_sig(int, struct task_struct *, int); 2537: extern int zap_other_threads(struct task_struct *p); 2538: extern struct sigqueue *sigqueue_alloc(void); 2539: extern void sigqueue_free(struct sigqueue *); 2540: extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2541: extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2542: 2543: static inline void restore_saved_sigmask(void) 2544: { 2545: if (test_and_clear_restore_sigmask()) 2546: __set_current_blocked(¤t->saved_sigmask); 2547: } 2548: 2549: static inline sigset_t *sigmask_to_save(void) 2550: { 2551: sigset_t *res = ¤t->blocked; 2552: if (unlikely(test_restore_sigmask())) 2553: res = ¤t->saved_sigmask; 2554: return res; 2555: } 2556: 2557: static inline int kill_cad_pid(int sig, int priv) 2558: { 2559: return kill_pid(cad_pid, sig, priv); 2560: } 2561: 2562: /* These can be the second arg to send_sig_info/send_group_sig_info. */ 2563: #define SEND_SIG_NOINFO ((struct siginfo *) 0) 2564: #define SEND_SIG_PRIV ((struct siginfo *) 1) 2565: #define SEND_SIG_FORCED ((struct siginfo *) 2) 2566: 2567: /* 2568: * True if we are on the alternate signal stack. 2569: */ 2570: static inline int on_sig_stack(unsigned long sp) 2571: { 2572: #ifdef CONFIG_STACK_GROWSUP 2573: return sp >= current->sas_ss_sp && 2574: sp - current->sas_ss_sp < current->sas_ss_size; 2575: #else 2576: return sp > current->sas_ss_sp && 2577: sp - current->sas_ss_sp <= current->sas_ss_size; 2578: #endif 2579: } 2580: 2581: static inline int sas_ss_flags(unsigned long sp) 2582: { 2583: if (!current->sas_ss_size) 2584: return SS_DISABLE; 2585: 2586: return on_sig_stack(sp) ? SS_ONSTACK : 0; 2587: } 2588: 2589: static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 2590: { 2591: if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 2592: #ifdef CONFIG_STACK_GROWSUP 2593: return current->sas_ss_sp; 2594: #else 2595: return current->sas_ss_sp + current->sas_ss_size; 2596: #endif 2597: return sp; 2598: } 2599: 2600: /* 2601: * Routines for handling mm_structs 2602: */ 2603: extern struct mm_struct * mm_alloc(void); 2604: 2605: /* mmdrop drops the mm and the page tables */ 2606: extern void __mmdrop(struct mm_struct *); 2607: static inline void mmdrop(struct mm_struct * mm) 2608: { 2609: if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2610: __mmdrop(mm); 2611: } 2612: 2613: /* mmput gets rid of the mappings and all user-space */ 2614: extern void mmput(struct mm_struct *); 2615: /* Grab a reference to a task's mm, if it is not already going away */ 2616: extern struct mm_struct *get_task_mm(struct task_struct *task); 2617: /* 2618: * Grab a reference to a task's mm, if it is not already going away 2619: * and ptrace_may_access with the mode parameter passed to it 2620: * succeeds. 2621: */ 2622: extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2623: /* Remove the current tasks stale references to the old mm_struct */ 2624: extern void mm_release(struct task_struct *, struct mm_struct *); 2625: 2626: #ifdef CONFIG_HAVE_COPY_THREAD_TLS 2627: extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, 2628: struct task_struct *, unsigned long); 2629: #else 2630: extern int copy_thread(unsigned long, unsigned long, unsigned long, 2631: struct task_struct *); 2632: 2633: /* Architectures that haven't opted into copy_thread_tls get the tls argument 2634: * via pt_regs, so ignore the tls argument passed via C. */ 2635: static inline int copy_thread_tls( 2636: unsigned long clone_flags, unsigned long sp, unsigned long arg, 2637: struct task_struct *p, unsigned long tls) 2638: { 2639: return copy_thread(clone_flags, sp, arg, p); 2640: } 2641: #endif 2642: extern void flush_thread(void); 2643: extern void exit_thread(void); 2644: 2645: extern void exit_files(struct task_struct *); 2646: extern void __cleanup_sighand(struct sighand_struct *); 2647: 2648: extern void exit_itimers(struct signal_struct *); 2649: extern void flush_itimer_signals(void); 2650: 2651: extern void do_group_exit(int); 2652: 2653: extern int do_execve(struct filename *, 2654: const char __user * const __user *, 2655: const char __user * const __user *); 2656: extern int do_execveat(int, struct filename *, 2657: const char __user * const __user *, 2658: const char __user * const __user *, 2659: int); 2660: extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); 2661: extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2662: struct task_struct *fork_idle(int); 2663: extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2664: 2665: extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 2666: static inline void set_task_comm(struct task_struct *tsk, const char *from) 2667: { 2668: __set_task_comm(tsk, from, false); 2669: } 2670: extern char *get_task_comm(char *to, struct task_struct *tsk); 2671: 2672: #ifdef CONFIG_SMP 2673: void scheduler_ipi(void); 2674: extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2675: #else 2676: static inline void scheduler_ipi(void) { } 2677: static inline unsigned long wait_task_inactive(struct task_struct *p, 2678: long match_state) 2679: { 2680: return 1; 2681: } 2682: #endif 2683: 2684: #define tasklist_empty() \ 2685: list_empty(&init_task.tasks) 2686: 2687: #define next_task(p) \ 2688: list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2689: 2690: #define for_each_process(p) \ 2691: for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2692: 2693: extern bool current_is_single_threaded(void); 2694: 2695: /* 2696: * Careful: do_each_thread/while_each_thread is a double loop so 2697: * 'break' will not work as expected - use goto instead. 2698: */ 2699: #define do_each_thread(g, t) \ 2700: for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2701: 2702: #define while_each_thread(g, t) \ 2703: while ((t = next_thread(t)) != g) 2704: 2705: #define __for_each_thread(signal, t) \ 2706: list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) 2707: 2708: #define for_each_thread(p, t) \ 2709: __for_each_thread((p)->signal, t) 2710: 2711: /* Careful: this is a double loop, 'break' won't work as expected. */ 2712: #define for_each_process_thread(p, t) \ 2713: for_each_process(p) for_each_thread(p, t) 2714: 2715: static inline int get_nr_threads(struct task_struct *tsk) 2716: { 2717: return tsk->signal->nr_threads; 2718: } 2719: 2720: static inline bool thread_group_leader(struct task_struct *p) 2721: { 2722: return p->exit_signal >= 0; 2723: } 2724: 2725: /* Do to the insanities of de_thread it is possible for a process 2726: * to have the pid of the thread group leader without actually being 2727: * the thread group leader. For iteration through the pids in proc 2728: * all we care about is that we have a task with the appropriate 2729: * pid, we don't actually care if we have the right task. 2730: */ 2731: static inline bool has_group_leader_pid(struct task_struct *p) 2732: { 2733: return task_pid(p) == p->signal->leader_pid; 2734: } 2735: 2736: static inline 2737: bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 2738: { 2739: return p1->signal == p2->signal; 2740: } 2741: 2742: static inline struct task_struct *next_thread(const struct task_struct *p) 2743: { 2744: return list_entry_rcu(p->thread_group.next, 2745: struct task_struct, thread_group); 2746: } 2747: 2748: static inline int thread_group_empty(struct task_struct *p) 2749: { 2750: return list_empty(&p->thread_group); 2751: } 2752: 2753: #define delay_group_leader(p) \ 2754: (thread_group_leader(p) && !thread_group_empty(p)) 2755: 2756: /* 2757: * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2758: * subscriptions and synchronises with wait4(). Also used in procfs. Also 2759: * pins the final release of task.io_context. Also protects ->cpuset and 2760: * ->cgroup.subsys[]. And ->vfork_done. 2761: * 2762: * Nests both inside and outside of read_lock(&tasklist_lock). 2763: * It must not be nested with write_lock_irq(&tasklist_lock), 2764: * neither inside nor outside. 2765: */ 2766: static inline void task_lock(struct task_struct *p) 2767: { 2768: spin_lock(&p->alloc_lock); 2769: } 2770: 2771: static inline void task_unlock(struct task_struct *p) 2772: { 2773: spin_unlock(&p->alloc_lock); 2774: } 2775: 2776: extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2777: unsigned long *flags); 2778: 2779: static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2780: unsigned long *flags) 2781: { 2782: struct sighand_struct *ret; 2783: 2784: ret = __lock_task_sighand(tsk, flags); 2785: (void)__cond_lock(&tsk->sighand->siglock, ret); 2786: return ret; 2787: } 2788: 2789: static inline void unlock_task_sighand(struct task_struct *tsk, 2790: unsigned long *flags) 2791: { 2792: spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2793: } 2794: 2795: /** 2796: * threadgroup_change_begin - mark the beginning of changes to a threadgroup 2797: * @tsk: task causing the changes 2798: * 2799: * All operations which modify a threadgroup - a new thread joining the 2800: * group, death of a member thread (the assertion of PF_EXITING) and 2801: * exec(2) dethreading the process and replacing the leader - are wrapped 2802: * by threadgroup_change_{begin|end}(). This is to provide a place which 2803: * subsystems needing threadgroup stability can hook into for 2804: * synchronization. 2805: */ 2806: static inline void threadgroup_change_begin(struct task_struct *tsk) 2807: { 2808: might_sleep(); 2809: cgroup_threadgroup_change_begin(tsk); 2810: } 2811: 2812: /** 2813: * threadgroup_change_end - mark the end of changes to a threadgroup 2814: * @tsk: task causing the changes 2815: * 2816: * See threadgroup_change_begin(). 2817: */ 2818: static inline void threadgroup_change_end(struct task_struct *tsk) 2819: { 2820: cgroup_threadgroup_change_end(tsk); 2821: } 2822: 2823: #ifndef __HAVE_THREAD_FUNCTIONS 2824: 2825: #define task_thread_info(task) ((struct thread_info *)(task)->stack) 2826: #define task_stack_page(task) ((task)->stack) 2827: 2828: static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2829: { 2830: *task_thread_info(p) = *task_thread_info(org); 2831: task_thread_info(p)->task = p; 2832: } 2833: 2834: /* 2835: * Return the address of the last usable long on the stack. 2836: * 2837: * When the stack grows down, this is just above the thread 2838: * info struct. Going any lower will corrupt the threadinfo. 2839: * 2840: * When the stack grows up, this is the highest address. 2841: * Beyond that position, we corrupt data on the next page. 2842: */ 2843: static inline unsigned long *end_of_stack(struct task_struct *p) 2844: { 2845: #ifdef CONFIG_STACK_GROWSUP 2846: return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; 2847: #else 2848: return (unsigned long *)(task_thread_info(p) + 1); 2849: #endif 2850: } 2851: 2852: #endif 2853: #define task_stack_end_corrupted(task) \ 2854: (*(end_of_stack(task)) != STACK_END_MAGIC) 2855: 2856: static inline int object_is_on_stack(void *obj) 2857: { 2858: void *stack = task_stack_page(current); 2859: 2860: return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2861: } 2862: 2863: extern void thread_info_cache_init(void); 2864: 2865: #ifdef CONFIG_DEBUG_STACK_USAGE 2866: static inline unsigned long stack_not_used(struct task_struct *p) 2867: { 2868: unsigned long *n = end_of_stack(p); 2869: 2870: do { /* Skip over canary */ 2871: n++; 2872: } while (!*n); 2873: 2874: return (unsigned long)n - (unsigned long)end_of_stack(p); 2875: } 2876: #endif 2877: extern void set_task_stack_end_magic(struct task_struct *tsk); 2878: 2879: /* set thread flags in other task's structures 2880: * - see asm/thread_info.h for TIF_xxxx flags available 2881: */ 2882: static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2883: { 2884: set_ti_thread_flag(task_thread_info(tsk), flag); 2885: } 2886: 2887: static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2888: { 2889: clear_ti_thread_flag(task_thread_info(tsk), flag); 2890: } 2891: 2892: static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2893: { 2894: return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2895: } 2896: 2897: static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2898: { 2899: return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2900: } 2901: 2902: static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2903: { 2904: return test_ti_thread_flag(task_thread_info(tsk), flag); 2905: } 2906: 2907: static inline void set_tsk_need_resched(struct task_struct *tsk) 2908: { 2909: set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2910: } 2911: 2912: static inline void clear_tsk_need_resched(struct task_struct *tsk) 2913: { 2914: clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2915: } 2916: 2917: static inline int test_tsk_need_resched(struct task_struct *tsk) 2918: { 2919: return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2920: } 2921: 2922: static inline int restart_syscall(void) 2923: { 2924: set_tsk_thread_flag(current, TIF_SIGPENDING); 2925: return -ERESTARTNOINTR; 2926: } 2927: 2928: static inline int signal_pending(struct task_struct *p) 2929: { 2930: return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2931: } 2932: 2933: static inline int __fatal_signal_pending(struct task_struct *p) 2934: { 2935: return unlikely(sigismember(&p->pending.signal, SIGKILL)); 2936: } 2937: 2938: static inline int fatal_signal_pending(struct task_struct *p) 2939: { 2940: return signal_pending(p) && __fatal_signal_pending(p); 2941: } 2942: 2943: static inline int signal_pending_state(long state, struct task_struct *p) 2944: { 2945: if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2946: return 0; 2947: if (!signal_pending(p)) 2948: return 0; 2949: 2950: return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2951: } 2952: 2953: /* 2954: * cond_resched() and cond_resched_lock(): latency reduction via 2955: * explicit rescheduling in places that are safe. The return 2956: * value indicates whether a reschedule was done in fact. 2957: * cond_resched_lock() will drop the spinlock before scheduling, 2958: * cond_resched_softirq() will enable bhs before scheduling. 2959: */ 2960: extern int _cond_resched(void); 2961: 2962: #define cond_resched() ({ \ 2963: ___might_sleep(__FILE__, __LINE__, 0); \ 2964: _cond_resched(); \ 2965: }) 2966: 2967: extern int __cond_resched_lock(spinlock_t *lock); 2968: 2969: #define cond_resched_lock(lock) ({ \ 2970: ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 2971: __cond_resched_lock(lock); \ 2972: }) 2973: 2974: extern int __cond_resched_softirq(void); 2975: 2976: #define cond_resched_softirq() ({ \ 2977: ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2978: __cond_resched_softirq(); \ 2979: }) 2980: 2981: static inline void cond_resched_rcu(void) 2982: { 2983: #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2984: rcu_read_unlock(); 2985: cond_resched(); 2986: rcu_read_lock(); 2987: #endif 2988: } 2989: 2990: /* 2991: * Does a critical section need to be broken due to another 2992: * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2993: * but a general need for low latency) 2994: */ 2995: static inline int spin_needbreak(spinlock_t *lock) 2996: { 2997: #ifdef CONFIG_PREEMPT 2998: return spin_is_contended(lock); 2999: #else 3000: return 0; 3001: #endif 3002: } 3003: 3004: /* 3005: * Idle thread specific functions to determine the need_resched 3006: * polling state. 3007: */ 3008: #ifdef TIF_POLLING_NRFLAG 3009: static inline int tsk_is_polling(struct task_struct *p) 3010: { 3011: return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); 3012: } 3013: 3014: static inline void __current_set_polling(void) 3015: { 3016: set_thread_flag(TIF_POLLING_NRFLAG); 3017: } 3018: 3019: static inline bool __must_check current_set_polling_and_test(void) 3020: { 3021: __current_set_polling(); 3022: 3023: /* 3024: * Polling state must be visible before we test NEED_RESCHED, 3025: * paired by resched_curr() 3026: */ 3027: smp_mb__after_atomic(); 3028: 3029: return unlikely(tif_need_resched()); 3030: } 3031: 3032: static inline void __current_clr_polling(void) 3033: { 3034: clear_thread_flag(TIF_POLLING_NRFLAG); 3035: } 3036: 3037: static inline bool __must_check current_clr_polling_and_test(void) 3038: { 3039: __current_clr_polling(); 3040: 3041: /* 3042: * Polling state must be visible before we test NEED_RESCHED, 3043: * paired by resched_curr() 3044: */ 3045: smp_mb__after_atomic(); 3046: 3047: return unlikely(tif_need_resched()); 3048: } 3049: 3050: #else 3051: static inline int tsk_is_polling(struct task_struct *p) { return 0; } 3052: static inline void __current_set_polling(void) { } 3053: static inline void __current_clr_polling(void) { } 3054: 3055: static inline bool __must_check current_set_polling_and_test(void) 3056: { 3057: return unlikely(tif_need_resched()); 3058: } 3059: static inline bool __must_check current_clr_polling_and_test(void) 3060: { 3061: return unlikely(tif_need_resched()); 3062: } 3063: #endif 3064: 3065: static inline void current_clr_polling(void) 3066: { 3067: __current_clr_polling(); 3068: 3069: /* 3070: * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. 3071: * Once the bit is cleared, we'll get IPIs with every new 3072: * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also 3073: * fold. 3074: */ 3075: smp_mb(); /* paired with resched_curr() */ 3076: 3077: preempt_fold_need_resched(); 3078: } 3079: 3080: static __always_inline bool need_resched(void) 3081: { 3082: return unlikely(tif_need_resched()); 3083: } 3084: 3085: /* 3086: * Thread group CPU time accounting. 3087: */ 3088: void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 3089: void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 3090: 3091: /* 3092: * Reevaluate whether the task has signals pending delivery. 3093: * Wake the task if so. 3094: * This is required every time the blocked sigset_t changes. 3095: * callers must hold sighand->siglock. 3096: */ 3097: extern void recalc_sigpending_and_wake(struct task_struct *t); 3098: extern void recalc_sigpending(void); 3099: 3100: extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 3101: 3102: static inline void signal_wake_up(struct task_struct *t, bool resume) 3103: { 3104: signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); 3105: } 3106: static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 3107: { 3108: signal_wake_up_state(t, resume ? __TASK_TRACED : 0); 3109: } 3110: 3111: /* 3112: * Wrappers for p->thread_info->cpu access. No-op on UP. 3113: */ 3114: #ifdef CONFIG_SMP 3115: 3116: static inline unsigned int task_cpu(const struct task_struct *p) 3117: { 3118: return task_thread_info(p)->cpu; 3119: } 3120: 3121: static inline int task_node(const struct task_struct *p) 3122: { 3123: return cpu_to_node(task_cpu(p)); 3124: } 3125: 3126: extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 3127: 3128: #else 3129: 3130: static inline unsigned int task_cpu(const struct task_struct *p) 3131: { 3132: return 0; 3133: } 3134: 3135: static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 3136: { 3137: } 3138: 3139: #endif /* CONFIG_SMP */ 3140: 3141: extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 3142: extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 3143: 3144: #ifdef CONFIG_CGROUP_SCHED 3145: extern struct task_group root_task_group; 3146: #endif /* CONFIG_CGROUP_SCHED */ 3147: 3148: extern int task_can_switch_user(struct user_struct *up, 3149: struct task_struct *tsk); 3150: 3151: #ifdef CONFIG_TASK_XACCT 3152: static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 3153: { 3154: tsk->ioac.rchar += amt; 3155: } 3156: 3157: static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 3158: { 3159: tsk->ioac.wchar += amt; 3160: } 3161: 3162: static inline void inc_syscr(struct task_struct *tsk) 3163: { 3164: tsk->ioac.syscr++; 3165: } 3166: 3167: static inline void inc_syscw(struct task_struct *tsk) 3168: { 3169: tsk->ioac.syscw++; 3170: } 3171: #else 3172: static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 3173: { 3174: } 3175: 3176: static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 3177: { 3178: } 3179: 3180: static inline void inc_syscr(struct task_struct *tsk) 3181: { 3182: } 3183: 3184: static inline void inc_syscw(struct task_struct *tsk) 3185: { 3186: } 3187: #endif 3188: 3189: #ifndef TASK_SIZE_OF 3190: #define TASK_SIZE_OF(tsk) TASK_SIZE 3191: #endif 3192: 3193: #ifdef CONFIG_MEMCG 3194: extern void mm_update_next_owner(struct mm_struct *mm); 3195: #else 3196: static inline void mm_update_next_owner(struct mm_struct *mm) 3197: { 3198: } 3199: #endif /* CONFIG_MEMCG */ 3200: 3201: static inline unsigned long task_rlimit(const struct task_struct *tsk, 3202: unsigned int limit) 3203: { 3204: return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); 3205: } 3206: 3207: static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 3208: unsigned int limit) 3209: { 3210: return READ_ONCE(tsk->signal->rlim[limit].rlim_max); 3211: } 3212: 3213: static inline unsigned long rlimit(unsigned int limit) 3214: { 3215: return task_rlimit(current, limit); 3216: } 3217: 3218: static inline unsigned long rlimit_max(unsigned int limit) 3219: { 3220: return task_rlimit_max(current, limit); 3221: } 3222: 3223: #endif