diff options
author | Philip Guenther <guenther@cvs.openbsd.org> | 2013-08-13 05:52:28 +0000 |
---|---|---|
committer | Philip Guenther <guenther@cvs.openbsd.org> | 2013-08-13 05:52:28 +0000 |
commit | f2b95a621a9fd41d222844eda237f607eb14d49c (patch) | |
tree | 38d62981cd1caedc3a6a01713c2e4266107b7220 /sys/kern/kern_time.c | |
parent | bee789b12305f3c5e594fe03c965e0786969eeb7 (diff) |
Switch time_t, ino_t, clock_t, and struct kevent's ident and data
members to 64bit types. Assign new syscall numbers for (almost
all) the syscalls that involve the affected types, including anything
with time_t, timeval, itimerval, timespec, rusage, dirent, stat,
or kevent arguments. Add a d_off member to struct dirent and replace
getdirentries() with getdents(), thus immensely simplifying and
accelerating telldir/seekdir. Build perl with -DBIG_TIME.
Bump the major on every single base library: the compat bits included
here are only good enough to make the transition; the T32 compat
option will be burned as soon as we've reached the new world are
are happy with the snapshots for all architectures.
DANGER: ABI incompatibility. Updating to this kernel requires extra
work or you won't be able to login: install a snapshot instead.
Much assistance in fixing userland issues from deraadt@ and tedu@
and build assistance from todd@ and otto@
Diffstat (limited to 'sys/kern/kern_time.c')
-rw-r--r-- | sys/kern/kern_time.c | 343 |
1 files changed, 338 insertions, 5 deletions
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c index 99ea70af6d1..72c032f18d7 100644 --- a/sys/kern/kern_time.c +++ b/sys/kern/kern_time.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_time.c,v 1.80 2013/06/17 19:11:54 guenther Exp $ */ +/* $OpenBSD: kern_time.c,v 1.81 2013/08/13 05:52:23 guenther Exp $ */ /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ /* @@ -81,11 +81,10 @@ settime(struct timespec *ts) * the time past the cutoff, it will take a very long time * to get to the wrap point. * - * XXX: we check against INT_MAX since on 64-bit - * platforms, sizeof(int) != sizeof(long) and - * time_t is 32 bits even when atv.tv_sec is 64 bits. + * XXX: we check against UINT_MAX until we can figure out + * how to deal with the hardware RTCs. */ - if (ts->tv_sec > INT_MAX - 365*24*60*60) { + if (ts->tv_sec > UINT_MAX - 365*24*60*60) { printf("denied attempt to set clock forward to %lld\n", (long long)ts->tv_sec); return (EPERM); @@ -774,3 +773,337 @@ ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) return (rv); } + +#ifdef T32 +int +t32_sys_clock_gettime(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_clock_gettime_args /* { + syscallarg(clockid_t) clock_id; + syscallarg(struct timespec32 *) tp; + } */ *uap = v; + struct timespec ats; + struct timespec32 ats32; + int error; + + if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) + return (error); + TIMESPEC_TO_32(&ats32, &ats); + return (copyout(&ats32, SCARG(uap, tp), sizeof(ats32))); +} + +int +t32_sys_clock_settime(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_clock_settime_args /* { + syscallarg(clockid_t) clock_id; + syscallarg(const struct timespec32 *) tp; + } */ *uap = v; + struct timespec ats; + struct timespec32 ats32; + clockid_t clock_id; + int error; + + if ((error = suser(p, 0)) != 0) + return (error); + + if ((error = copyin(SCARG(uap, tp), &ats32, sizeof(ats32))) != 0) + return (error); + TIMESPEC_FROM_32(&ats, &ats32); + + clock_id = SCARG(uap, clock_id); + switch (clock_id) { + case CLOCK_REALTIME: + if ((error = settime(&ats)) != 0) + return (error); + break; + default: /* Other clocks are read-only */ + return (EINVAL); + } + + return (0); +} + +int +t32_sys_clock_getres(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_clock_getres_args /* { + syscallarg(clockid_t) clock_id; + syscallarg(struct timespec32 *) tp; + } */ *uap = v; + clockid_t clock_id; + struct timespec32 ts; + int error = 0; + + clock_id = SCARG(uap, clock_id); + switch (clock_id) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + ts.tv_sec = 0; + ts.tv_nsec = 1000000000 / hz; + break; + default: + return (EINVAL); + } + + if (SCARG(uap, tp)) { + error = copyout(&ts, SCARG(uap, tp), sizeof (ts)); +#ifdef KTRACE + if (error == 0 && KTRPOINT(p, KTR_STRUCT)) { + KERNEL_LOCK(); + ktrreltimespec(p, &ts); + KERNEL_UNLOCK(); + } +#endif + } + + return error; +} +#endif /* T32 */ + +#if defined(T32) || defined(COMPAT_LINUX) +int +t32_sys_nanosleep(struct proc *p, void *v, register_t *retval) +{ + static int nanowait; + struct t32_sys_nanosleep_args/* { + syscallarg(const struct timespec32 *) rqtp; + syscallarg(struct timespec32 *) rmtp; + } */ *uap = v; + struct timespec32 ts32; + struct timespec rqt, rmt; + struct timespec sts, ets; + struct timespec32 *rmtp; + struct timeval tv; + int error, error1; + + rmtp = SCARG(uap, rmtp); + error = copyin(SCARG(uap, rqtp), &ts32, sizeof(ts32)); + if (error) + return (error); + TIMESPEC_FROM_32(&rqt, &ts32); +#ifdef KTRACE + if (KTRPOINT(p, KTR_STRUCT)) { + KERNEL_LOCK(); + ktrreltimespec(p, &rqt); + KERNEL_UNLOCK(); + } +#endif + + TIMESPEC_TO_TIMEVAL(&tv, &rqt); + if (itimerfix(&tv)) + return (EINVAL); + + if (rmtp) + getnanouptime(&sts); + + error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", + MAX(1, tvtohz(&tv))); + if (error == ERESTART) + error = EINTR; + if (error == EWOULDBLOCK) + error = 0; + + if (rmtp) { + getnanouptime(&ets); + + timespecsub(&ets, &sts, &sts); + timespecsub(&rqt, &sts, &rmt); + + if (rmt.tv_sec < 0) + timespecclear(&rmt); + + TIMESPEC_TO_32(&ts32, &rmt); + error1 = copyout(&ts32, rmtp, sizeof(ts32)); + if (error1 != 0) + error = error1; +#ifdef KTRACE + if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) { + KERNEL_LOCK(); + ktrreltimespec(p, &rmt); + KERNEL_UNLOCK(); + } +#endif + } + + return error; +} + +/* ARGSUSED */ +int +t32_sys_gettimeofday(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_gettimeofday_args /* { + syscallarg(struct timeval32 *) tp; + syscallarg(struct timezone *) tzp; + } */ *uap = v; + struct timeval atv; + struct timeval32 atv32; + struct timeval32 *tp; + struct timezone *tzp; + int error = 0; + + tp = SCARG(uap, tp); + tzp = SCARG(uap, tzp); + + if (tp) { + microtime(&atv); + TIMEVAL_TO_32(&atv32, &atv); + if ((error = copyout(&atv32, tp, sizeof (atv32)))) + return (error); +#ifdef KTRACE + if (KTRPOINT(p, KTR_STRUCT)) { + KERNEL_LOCK(); + ktrabstimeval(p, &atv); + KERNEL_UNLOCK(); + } +#endif + } + if (tzp) + error = copyout(&tz, tzp, sizeof (tz)); + return (error); +} + +/* ARGSUSED */ +int +t32_sys_settimeofday(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_settimeofday_args /* { + syscallarg(const struct timeval32 *) tv; + syscallarg(const struct timezone *) tzp; + } */ *uap = v; + struct timezone atz; + struct timeval32 atv; + const struct timeval32 *tv; + const struct timezone *tzp; + int error; + + tv = SCARG(uap, tv); + tzp = SCARG(uap, tzp); + + if ((error = suser(p, 0))) + return (error); + /* Verify all parameters before changing time. */ + if (tv && (error = copyin(tv, &atv, sizeof(atv)))) + return (error); + if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) + return (error); + if (tv) { + struct timespec ts; + + TIMESPEC_FROM_TIMEVAL32(&ts, &atv); + if ((error = settime(&ts)) != 0) + return (error); + } + if (tzp) + tz = atz; + return (0); +} + +int +t32_sys_getitimer(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_getitimer_args /* { + syscallarg(int) which; + syscallarg(struct itimerval32 *) itv; + } */ *uap = v; + struct itimerval aitv; + struct itimerval32 aitv32; + int s; + int which; + + which = SCARG(uap, which); + + if (which < ITIMER_REAL || which > ITIMER_PROF) + return (EINVAL); + s = splclock(); + aitv = p->p_p->ps_timer[which]; + + if (which == ITIMER_REAL) { + struct timeval now; + + getmicrouptime(&now); + /* + * Convert from absolute to relative time in .it_value + * part of real time timer. If time for real time timer + * has passed return 0, else return difference between + * current time and time for the timer to go off. + */ + if (timerisset(&aitv.it_value)) { + if (timercmp(&aitv.it_value, &now, <)) + timerclear(&aitv.it_value); + else + timersub(&aitv.it_value, &now, + &aitv.it_value); + } + } + splx(s); + ITIMERVAL_TO_32(&aitv32, &aitv); + return (copyout(&aitv32, SCARG(uap, itv), sizeof(aitv32))); +} + +/* ARGSUSED */ +int +t32_sys_setitimer(struct proc *p, void *v, register_t *retval) +{ + struct t32_sys_setitimer_args /* { + syscallarg(int) which; + syscallarg(const struct itimerval32 *) itv; + syscallarg(struct itimerval32 *) oitv; + } */ *uap = v; + struct t32_sys_getitimer_args getargs; + struct itimerval aitv; + struct itimerval32 aitv32; + const struct itimerval32 *itvp; + struct itimerval32 *oitv; + struct process *pr = p->p_p; + int error; + int timo; + int which; + + which = SCARG(uap, which); + itvp = SCARG(uap, itv); + oitv = SCARG(uap, oitv); + + if (which < ITIMER_REAL || which > ITIMER_PROF) + return (EINVAL); + if (itvp && (error = copyin(itvp, &aitv32, sizeof(aitv32)))) + return (error); + if (oitv != NULL) { + SCARG(&getargs, which) = which; + SCARG(&getargs, itv) = oitv; + if ((error = t32_sys_getitimer(p, &getargs, retval))) + return (error); + } + if (itvp == 0) + return (0); + ITIMERVAL_FROM_32(&aitv, &aitv32); + if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) + return (EINVAL); + if (which == ITIMER_REAL) { + struct timeval ctv; + + timeout_del(&pr->ps_realit_to); + getmicrouptime(&ctv); + if (timerisset(&aitv.it_value)) { + timo = tvtohz(&aitv.it_value); + timeout_add(&pr->ps_realit_to, timo); + timeradd(&aitv.it_value, &ctv, &aitv.it_value); + } + pr->ps_timer[ITIMER_REAL] = aitv; + } else { + int s; + + itimerround(&aitv.it_interval); + s = splclock(); + pr->ps_timer[which] = aitv; + if (which == ITIMER_VIRTUAL) + timeout_del(&pr->ps_virt_to); + if (which == ITIMER_PROF) + timeout_del(&pr->ps_prof_to); + splx(s); + } + + return (0); +} +#endif /* defined(T32) || defined(COMPAT_LINUX) */ |