summaryrefslogtreecommitdiff
path: root/sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch')
-rw-r--r--sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch1435
1 files changed, 1435 insertions, 0 deletions
diff --git a/sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
new file mode 100644
index 00000000..be5fa3d2
--- /dev/null
+++ b/sys-kernel/linux-image-redcore/files/5.4-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
@@ -0,0 +1,1435 @@
+From 688c8d0716e6598dd7c25c89d4699704a3337bd5 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:28:30 +1100
+Subject: [PATCH 07/16] Replace all schedule timeout(1) with
+ schedule_min_hrtimeout()
+
+---
+ drivers/block/swim.c | 6 +-
+ drivers/char/ipmi/ipmi_msghandler.c | 2 +-
+ drivers/char/ipmi/ipmi_ssif.c | 2 +-
+ drivers/char/snsc.c | 469 ++++++++++++++++++
+ drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +-
+ drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +-
+ drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +-
+ drivers/media/pci/ivtv/ivtv-streams.c | 2 +-
+ drivers/mfd/ucb1x00-core.c | 2 +-
+ drivers/misc/sgi-xp/xpc_channel.c | 2 +-
+ drivers/net/caif/caif_hsi.c | 2 +-
+ drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +-
+ drivers/net/usb/lan78xx.c | 2 +-
+ drivers/net/usb/usbnet.c | 2 +-
+ drivers/scsi/fnic/fnic_scsi.c | 4 +-
+ drivers/scsi/snic/snic_scsi.c | 2 +-
+ .../staging/comedi/drivers/ni_mio_common.c | 2 +-
+ drivers/staging/lustre/lnet/lnet/lib-eq.c | 426 ++++++++++++++++
+ drivers/staging/rts5208/rtsx.c | 2 +-
+ drivers/staging/speakup/speakup_acntpc.c | 4 +-
+ drivers/staging/speakup/speakup_apollo.c | 2 +-
+ drivers/staging/speakup/speakup_decext.c | 2 +-
+ drivers/staging/speakup/speakup_decpc.c | 2 +-
+ drivers/staging/speakup/speakup_dectlk.c | 2 +-
+ drivers/staging/speakup/speakup_dtlk.c | 4 +-
+ drivers/staging/speakup/speakup_keypc.c | 4 +-
+ drivers/staging/speakup/synth.c | 14 +-
+ .../staging/unisys/visornic/visornic_main.c | 6 +-
+ drivers/video/fbdev/omap/hwa742.c | 2 +-
+ drivers/video/fbdev/pxafb.c | 2 +-
+ fs/btrfs/inode-map.c | 2 +-
+ sound/usb/line6/pcm.c | 2 +-
+ 32 files changed, 936 insertions(+), 47 deletions(-)
+ create mode 100644 drivers/char/snsc.c
+ create mode 100644 drivers/staging/lustre/lnet/lnet/lib-eq.c
+
+diff --git a/drivers/block/swim.c b/drivers/block/swim.c
+index 4c297f69171d..5bc4f1be2617 100644
+--- a/drivers/block/swim.c
++++ b/drivers/block/swim.c
+@@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
+ if (swim_readbit(base, MOTOR_ON))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ } else if (action == OFF) {
+ swim_action(base, MOTOR_OFF);
+@@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
+ if (!swim_readbit(base, DISK_IN))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ swim_select(base, RELAX);
+ }
+@@ -371,7 +371,7 @@ static inline int swim_step(struct swim __iomem *base)
+ for (wait = 0; wait < HZ; wait++) {
+
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ swim_select(base, RELAX);
+ if (!swim_readbit(base, STEP))
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 2aab80e19ae0..6200dbb3b5ef 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3544,7 +3544,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ /* No need for locks, the interface is down. */
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 22c6a2e61236..c4bccd444cbf 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1289,7 +1289,7 @@ static void shutdown_ssif(void *send_info)
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->watch_timer);
+diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
+new file mode 100644
+index 000000000000..5228e78df804
+--- /dev/null
++++ b/drivers/char/snsc.c
+@@ -0,0 +1,469 @@
++/*
++ * SN Platform system controller communication support
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
++ */
++
++/*
++ * System controller communication driver
++ *
++ * This driver allows a user process to communicate with the system
++ * controller (a.k.a. "IRouter") network in an SGI SN system.
++ */
++
++#include <linux/interrupt.h>
++#include <linux/sched/signal.h>
++#include <linux/device.h>
++#include <linux/poll.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <asm/sn/io.h>
++#include <asm/sn/sn_sal.h>
++#include <asm/sn/module.h>
++#include <asm/sn/geo.h>
++#include <asm/sn/nodepda.h>
++#include "snsc.h"
++
++#define SYSCTL_BASENAME "snsc"
++
++#define SCDRV_BUFSZ 2048
++#define SCDRV_TIMEOUT 1000
++
++static DEFINE_MUTEX(scdrv_mutex);
++static irqreturn_t
++scdrv_interrupt(int irq, void *subch_data)
++{
++ struct subch_data_s *sd = subch_data;
++ unsigned long flags;
++ int status;
++
++ spin_lock_irqsave(&sd->sd_rlock, flags);
++ spin_lock(&sd->sd_wlock);
++ status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
++
++ if (status > 0) {
++ if (status & SAL_IROUTER_INTR_RECV) {
++ wake_up(&sd->sd_rq);
++ }
++ if (status & SAL_IROUTER_INTR_XMIT) {
++ ia64_sn_irtr_intr_disable
++ (sd->sd_nasid, sd->sd_subch,
++ SAL_IROUTER_INTR_XMIT);
++ wake_up(&sd->sd_wq);
++ }
++ }
++ spin_unlock(&sd->sd_wlock);
++ spin_unlock_irqrestore(&sd->sd_rlock, flags);
++ return IRQ_HANDLED;
++}
++
++/*
++ * scdrv_open
++ *
++ * Reserve a subchannel for system controller communication.
++ */
++
++static int
++scdrv_open(struct inode *inode, struct file *file)
++{
++ struct sysctl_data_s *scd;
++ struct subch_data_s *sd;
++ int rv;
++
++ /* look up device info for this device file */
++ scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
++
++ /* allocate memory for subchannel data */
++ sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
++ if (sd == NULL) {
++ printk("%s: couldn't allocate subchannel data\n",
++ __func__);
++ return -ENOMEM;
++ }
++
++ /* initialize subch_data_s fields */
++ sd->sd_nasid = scd->scd_nasid;
++ sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
++
++ if (sd->sd_subch < 0) {
++ kfree(sd);
++ printk("%s: couldn't allocate subchannel\n", __func__);
++ return -EBUSY;
++ }
++
++ spin_lock_init(&sd->sd_rlock);
++ spin_lock_init(&sd->sd_wlock);
++ init_waitqueue_head(&sd->sd_rq);
++ init_waitqueue_head(&sd->sd_wq);
++ sema_init(&sd->sd_rbs, 1);
++ sema_init(&sd->sd_wbs, 1);
++
++ file->private_data = sd;
++
++ /* hook this subchannel up to the system controller interrupt */
++ mutex_lock(&scdrv_mutex);
++ rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
++ IRQF_SHARED, SYSCTL_BASENAME, sd);
++ if (rv) {
++ ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
++ kfree(sd);
++ printk("%s: irq request failed (%d)\n", __func__, rv);
++ mutex_unlock(&scdrv_mutex);
++ return -EBUSY;
++ }
++ mutex_unlock(&scdrv_mutex);
++ return 0;
++}
++
++/*
++ * scdrv_release
++ *
++ * Release a previously-reserved subchannel.
++ */
++
++static int
++scdrv_release(struct inode *inode, struct file *file)
++{
++ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
++ int rv;
++
++ /* free the interrupt */
++ free_irq(SGI_UART_VECTOR, sd);
++
++ /* ask SAL to close the subchannel */
++ rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
++
++ kfree(sd);
++ return rv;
++}
++
++/*
++ * scdrv_read
++ *
++ * Called to read bytes from the open IRouter pipe.
++ *
++ */
++
++static inline int
++read_status_check(struct subch_data_s *sd, int *len)
++{
++ return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len);
++}
++
++static ssize_t
++scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
++{
++ int status;
++ int len;
++ unsigned long flags;
++ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
++
++ /* try to get control of the read buffer */
++ if (down_trylock(&sd->sd_rbs)) {
++ /* somebody else has it now;
++ * if we're non-blocking, then exit...
++ */
++ if (file->f_flags & O_NONBLOCK) {
++ return -EAGAIN;
++ }
++ /* ...or if we want to block, then do so here */
++ if (down_interruptible(&sd->sd_rbs)) {
++ /* something went wrong with wait */
++ return -ERESTARTSYS;
++ }
++ }
++
++ /* anything to read? */
++ len = CHUNKSIZE;
++ spin_lock_irqsave(&sd->sd_rlock, flags);
++ status = read_status_check(sd, &len);
++
++ /* if not, and we're blocking I/O, loop */
++ while (status < 0) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ if (file->f_flags & O_NONBLOCK) {
++ spin_unlock_irqrestore(&sd->sd_rlock, flags);
++ up(&sd->sd_rbs);
++ return -EAGAIN;
++ }
++
++ len = CHUNKSIZE;
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_wait_queue(&sd->sd_rq, &wait);
++ spin_unlock_irqrestore(&sd->sd_rlock, flags);
++
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
++
++ remove_wait_queue(&sd->sd_rq, &wait);
++ if (signal_pending(current)) {
++ /* wait was interrupted */
++ up(&sd->sd_rbs);
++ return -ERESTARTSYS;
++ }
++
++ spin_lock_irqsave(&sd->sd_rlock, flags);
++ status = read_status_check(sd, &len);
++ }
++ spin_unlock_irqrestore(&sd->sd_rlock, flags);
++
++ if (len > 0) {
++ /* we read something in the last read_status_check(); copy
++ * it out to user space
++ */
++ if (count < len) {
++ pr_debug("%s: only accepting %d of %d bytes\n",
++ __func__, (int) count, len);
++ }
++ len = min((int) count, len);
++ if (copy_to_user(buf, sd->sd_rb, len))
++ len = -EFAULT;
++ }
++
++ /* release the read buffer and wake anyone who might be
++ * waiting for it
++ */
++ up(&sd->sd_rbs);
++
++ /* return the number of characters read in */
++ return len;
++}
++
++/*
++ * scdrv_write
++ *
++ * Writes a chunk of an IRouter packet (or other system controller data)
++ * to the system controller.
++ *
++ */
++static inline int
++write_status_check(struct subch_data_s *sd, int count)
++{
++ return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count);
++}
++
++static ssize_t
++scdrv_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ unsigned long flags;
++ int status;
++ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
++
++ /* try to get control of the write buffer */
++ if (down_trylock(&sd->sd_wbs)) {
++ /* somebody else has it now;
++ * if we're non-blocking, then exit...
++ */
++ if (file->f_flags & O_NONBLOCK) {
++ return -EAGAIN;
++ }
++ /* ...or if we want to block, then do so here */
++ if (down_interruptible(&sd->sd_wbs)) {
++ /* something went wrong with wait */
++ return -ERESTARTSYS;
++ }
++ }
++
++ count = min((int) count, CHUNKSIZE);
++ if (copy_from_user(sd->sd_wb, buf, count)) {
++ up(&sd->sd_wbs);
++ return -EFAULT;
++ }
++
++ /* try to send the buffer */
++ spin_lock_irqsave(&sd->sd_wlock, flags);
++ status = write_status_check(sd, count);
++
++ /* if we failed, and we want to block, then loop */
++ while (status <= 0) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ if (file->f_flags & O_NONBLOCK) {
++ spin_unlock_irqrestore(&sd->sd_wlock, flags);
++ up(&sd->sd_wbs);
++ return -EAGAIN;
++ }
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_wait_queue(&sd->sd_wq, &wait);
++ spin_unlock_irqrestore(&sd->sd_wlock, flags);
++
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
++
++ remove_wait_queue(&sd->sd_wq, &wait);
++ if (signal_pending(current)) {
++ /* wait was interrupted */
++ up(&sd->sd_wbs);
++ return -ERESTARTSYS;
++ }
++
++ spin_lock_irqsave(&sd->sd_wlock, flags);
++ status = write_status_check(sd, count);
++ }
++ spin_unlock_irqrestore(&sd->sd_wlock, flags);
++
++ /* release the write buffer and wake anyone who's waiting for it */
++ up(&sd->sd_wbs);
++
++ /* return the number of characters accepted (should be the complete
++ * "chunk" as requested)
++ */
++ if ((status >= 0) && (status < count)) {
++ pr_debug("Didn't accept the full chunk; %d of %d\n",
++ status, (int) count);
++ }
++ return status;
++}
++
++static __poll_t
++scdrv_poll(struct file *file, struct poll_table_struct *wait)
++{
++ __poll_t mask = 0;
++ int status = 0;
++ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
++ unsigned long flags;
++
++ poll_wait(file, &sd->sd_rq, wait);
++ poll_wait(file, &sd->sd_wq, wait);
++
++ spin_lock_irqsave(&sd->sd_rlock, flags);
++ spin_lock(&sd->sd_wlock);
++ status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
++ spin_unlock(&sd->sd_wlock);
++ spin_unlock_irqrestore(&sd->sd_rlock, flags);
++
++ if (status > 0) {
++ if (status & SAL_IROUTER_INTR_RECV) {
++ mask |= EPOLLIN | EPOLLRDNORM;
++ }
++ if (status & SAL_IROUTER_INTR_XMIT) {
++ mask |= EPOLLOUT | EPOLLWRNORM;
++ }
++ }
++
++ return mask;
++}
++
++static const struct file_operations scdrv_fops = {
++ .owner = THIS_MODULE,
++ .read = scdrv_read,
++ .write = scdrv_write,
++ .poll = scdrv_poll,
++ .open = scdrv_open,
++ .release = scdrv_release,
++ .llseek = noop_llseek,
++};
++
++static struct class *snsc_class;
++
++/*
++ * scdrv_init
++ *
++ * Called at boot time to initialize the system controller communication
++ * facility.
++ */
++int __init
++scdrv_init(void)
++{
++ geoid_t geoid;
++ cnodeid_t cnode;
++ char devname[32];
++ char *devnamep;
++ struct sysctl_data_s *scd;
++ void *salbuf;
++ dev_t first_dev, dev;
++ nasid_t event_nasid;
++
++ if (!ia64_platform_is("sn2"))
++ return -ENODEV;
++
++ event_nasid = ia64_sn_get_console_nasid();
++
++ snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
++ if (IS_ERR(snsc_class)) {
++ printk("%s: failed to allocate class\n", __func__);
++ return PTR_ERR(snsc_class);
++ }
++
++ if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
++ SYSCTL_BASENAME) < 0) {
++ printk("%s: failed to register SN system controller device\n",
++ __func__);
++ return -ENODEV;
++ }
++
++ for (cnode = 0; cnode < num_cnodes; cnode++) {
++ geoid = cnodeid_get_geoid(cnode);
++ devnamep = devname;
++ format_module_id(devnamep, geo_module(geoid),
++ MODULE_FORMAT_BRIEF);
++ devnamep = devname + strlen(devname);
++ sprintf(devnamep, "^%d#%d", geo_slot(geoid),
++ geo_slab(geoid));
++
++ /* allocate sysctl device data */
++ scd = kzalloc(sizeof (struct sysctl_data_s),
++ GFP_KERNEL);
++ if (!scd) {
++ printk("%s: failed to allocate device info"
++ "for %s/%s\n", __func__,
++ SYSCTL_BASENAME, devname);
++ continue;
++ }
++
++ /* initialize sysctl device data fields */
++ scd->scd_nasid = cnodeid_to_nasid(cnode);
++ if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
++ printk("%s: failed to allocate driver buffer"
++ "(%s%s)\n", __func__,
++ SYSCTL_BASENAME, devname);
++ kfree(scd);
++ continue;
++ }
++
++ if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
++ SCDRV_BUFSZ) < 0) {
++ printk
++ ("%s: failed to initialize SAL for"
++ " system controller communication"
++ " (%s/%s): outdated PROM?\n",
++ __func__, SYSCTL_BASENAME, devname);
++ kfree(scd);
++ kfree(salbuf);
++ continue;
++ }
++
++ dev = first_dev + cnode;
++ cdev_init(&scd->scd_cdev, &scdrv_fops);
++ if (cdev_add(&scd->scd_cdev, dev, 1)) {
++ printk("%s: failed to register system"
++ " controller device (%s%s)\n",
++ __func__, SYSCTL_BASENAME, devname);
++ kfree(scd);
++ kfree(salbuf);
++ continue;
++ }
++
++ device_create(snsc_class, NULL, dev, NULL,
++ "%s", devname);
++
++ ia64_sn_irtr_intr_enable(scd->scd_nasid,
++ 0 /*ignored */ ,
++ SAL_IROUTER_INTR_RECV);
++
++ /* on the console nasid, prepare to receive
++ * system controller environmental events
++ */
++ if(scd->scd_nasid == event_nasid) {
++ scdrv_event_init(scd);
++ }
++ }
++ return 0;
++}
++device_initcall(scdrv_init);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index e5252ef3812f..6ae6241185ea 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -237,7 +237,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index 75f3efee21a4..09b1932ce85b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -203,7 +203,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+ break;
+ }
+ if (lazy)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
+index 137853944e46..76830892f373 100644
+--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
+@@ -1137,7 +1137,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+- schedule_timeout(msecs_to_jiffies(25));
++ schedule_msec_hrtimeout((25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
+diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
+index f7de9118f609..f39ad2952c0f 100644
+--- a/drivers/media/pci/ivtv/ivtv-streams.c
++++ b/drivers/media/pci/ivtv/ivtv-streams.c
+@@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ time_before(jiffies,
+ then + msecs_to_jiffies(2000))) {
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ }
+
+ /* To convert jiffies to ms, we must multiply by 1000
+diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
+index b690796d24d4..448b13da62b4 100644
+--- a/drivers/mfd/ucb1x00-core.c
++++ b/drivers/mfd/ucb1x00-core.c
+@@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
+ break;
+ /* yield to other processes */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ return UCB_ADC_DAT(val);
+diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
+index 8e6607fc8a67..b9ab770bbdb5 100644
+--- a/drivers/misc/sgi-xp/xpc_channel.c
++++ b/drivers/misc/sgi-xp/xpc_channel.c
+@@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+- ret = schedule_timeout(1);
++ ret = schedule_min_hrtimeout();
+ finish_wait(&ch->msg_allocate_wq, &wait);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index bbb2575d4728..637757144221 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ retry--;
+ }
+
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+index d2539c95adb6..0c2f31a03ce9 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
+@@ -242,7 +242,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index f24a1b0b801f..972313b92b0a 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2676,7 +2676,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index dde05e2fdc3e..fa6c1581136e 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -767,7 +767,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 80608b53897b..84051b538fa8 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -216,7 +216,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+@@ -2273,7 +2273,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
+ }
+ }
+
+- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
+index b3650c989ed4..7ed1fb285754 100644
+--- a/drivers/scsi/snic/snic_scsi.c
++++ b/drivers/scsi/snic/snic_scsi.c
+@@ -2353,7 +2353,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index f98e3ae27bff..0741c8352a6d 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -4742,7 +4742,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
+ if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (schedule_timeout(1))
++ if (schedule_min_hrtimeout())
+ return -EIO;
+ }
+ if (i == timeout) {
+diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+new file mode 100644
+index 000000000000..8cca151741b2
+--- /dev/null
++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+@@ -0,0 +1,426 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * GPL HEADER START
++ *
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 only,
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License version 2 for more details (a copy is included
++ * in the LICENSE file that accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License
++ * version 2 along with this program; If not, see
++ * http://www.gnu.org/licenses/gpl-2.0.html
++ *
++ * GPL HEADER END
++ */
++/*
++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
++ * Use is subject to license terms.
++ *
++ * Copyright (c) 2012, Intel Corporation.
++ */
++/*
++ * This file is part of Lustre, http://www.lustre.org/
++ * Lustre is a trademark of Sun Microsystems, Inc.
++ *
++ * lnet/lnet/lib-eq.c
++ *
++ * Library level Event queue management routines
++ */
++
++#define DEBUG_SUBSYSTEM S_LNET
++
++#include <linux/lnet/lib-lnet.h>
++
++/**
++ * Create an event queue that has room for \a count number of events.
++ *
++ * The event queue is circular and older events will be overwritten by new
++ * ones if they are not removed in time by the user using the functions
++ * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
++ * determine the appropriate size of the event queue to prevent this loss
++ * of events. Note that when EQ handler is specified in \a callback, no
++ * event loss can happen, since the handler is run for each event deposited
++ * into the EQ.
++ *
++ * \param count The number of events to be stored in the event queue. It
++ * will be rounded up to the next power of two.
++ * \param callback A handler function that runs when an event is deposited
++ * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
++ * indicate that no event handler is desired.
++ * \param handle On successful return, this location will hold a handle for
++ * the newly created EQ.
++ *
++ * \retval 0 On success.
++ * \retval -EINVAL If an parameter is not valid.
++ * \retval -ENOMEM If memory for the EQ can't be allocated.
++ *
++ * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
++ */
++int
++LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
++ struct lnet_handle_eq *handle)
++{
++ struct lnet_eq *eq;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ /*
++ * We need count to be a power of 2 so that when eq_{enq,deq}_seq
++ * overflow, they don't skip entries, so the queue has the same
++ * apparent capacity at all times
++ */
++ if (count)
++ count = roundup_pow_of_two(count);
++
++ if (callback != LNET_EQ_HANDLER_NONE && count)
++ CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
++
++ /*
++ * count can be 0 if only need callback, we can eliminate
++ * overhead of enqueue event
++ */
++ if (!count && callback == LNET_EQ_HANDLER_NONE)
++ return -EINVAL;
++
++ eq = kzalloc(sizeof(*eq), GFP_NOFS);
++ if (!eq)
++ return -ENOMEM;
++
++ if (count) {
++ eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event),
++ GFP_KERNEL | __GFP_ZERO);
++ if (!eq->eq_events)
++ goto failed;
++ /*
++ * NB allocator has set all event sequence numbers to 0,
++ * so all them should be earlier than eq_deq_seq
++ */
++ }
++
++ eq->eq_deq_seq = 1;
++ eq->eq_enq_seq = 1;
++ eq->eq_size = count;
++ eq->eq_callback = callback;
++
++ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
++ sizeof(*eq->eq_refs[0]));
++ if (!eq->eq_refs)
++ goto failed;
++
++ /* MUST hold both exclusive lnet_res_lock */
++ lnet_res_lock(LNET_LOCK_EX);
++ /*
++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
++ * both EQ lookup and poll event with only lnet_eq_wait_lock
++ */
++ lnet_eq_wait_lock();
++
++ lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
++ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
++
++ lnet_eq_wait_unlock();
++ lnet_res_unlock(LNET_LOCK_EX);
++
++ lnet_eq2handle(handle, eq);
++ return 0;
++
++failed:
++ kvfree(eq->eq_events);
++
++ if (eq->eq_refs)
++ cfs_percpt_free(eq->eq_refs);
++
++ kfree(eq);
++ return -ENOMEM;
++}
++EXPORT_SYMBOL(LNetEQAlloc);
++
++/**
++ * Release the resources associated with an event queue if it's idle;
++ * otherwise do nothing and it's up to the user to try again.
++ *
++ * \param eqh A handle for the event queue to be released.
++ *
++ * \retval 0 If the EQ is not in use and freed.
++ * \retval -ENOENT If \a eqh does not point to a valid EQ.
++ * \retval -EBUSY If the EQ is still in use by some MDs.
++ */
++int
++LNetEQFree(struct lnet_handle_eq eqh)
++{
++ struct lnet_eq *eq;
++ struct lnet_event *events = NULL;
++ int **refs = NULL;
++ int *ref;
++ int rc = 0;
++ int size = 0;
++ int i;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ lnet_res_lock(LNET_LOCK_EX);
++ /*
++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
++ * both EQ lookup and poll event with only lnet_eq_wait_lock
++ */
++ lnet_eq_wait_lock();
++
++ eq = lnet_handle2eq(&eqh);
++ if (!eq) {
++ rc = -ENOENT;
++ goto out;
++ }
++
++ cfs_percpt_for_each(ref, i, eq->eq_refs) {
++ LASSERT(*ref >= 0);
++ if (!*ref)
++ continue;
++
++ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
++ i, *ref);
++ rc = -EBUSY;
++ goto out;
++ }
++
++ /* stash for free after lock dropped */
++ events = eq->eq_events;
++ size = eq->eq_size;
++ refs = eq->eq_refs;
++
++ lnet_res_lh_invalidate(&eq->eq_lh);
++ list_del(&eq->eq_list);
++ kfree(eq);
++ out:
++ lnet_eq_wait_unlock();
++ lnet_res_unlock(LNET_LOCK_EX);
++
++ kvfree(events);
++ if (refs)
++ cfs_percpt_free(refs);
++
++ return rc;
++}
++EXPORT_SYMBOL(LNetEQFree);
++
++void
++lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev)
++{
++ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
++ int index;
++
++ if (!eq->eq_size) {
++ LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
++ eq->eq_callback(ev);
++ return;
++ }
++
++ lnet_eq_wait_lock();
++ ev->sequence = eq->eq_enq_seq++;
++
++ LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
++ index = ev->sequence & (eq->eq_size - 1);
++
++ eq->eq_events[index] = *ev;
++
++ if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
++ eq->eq_callback(ev);
++
++ /* Wake anyone waiting in LNetEQPoll() */
++ if (waitqueue_active(&the_lnet.ln_eq_waitq))
++ wake_up_all(&the_lnet.ln_eq_waitq);
++ lnet_eq_wait_unlock();
++}
++
++static int
++lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev)
++{
++ int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
++ struct lnet_event *new_event = &eq->eq_events[new_index];
++ int rc;
++
++ /* must called with lnet_eq_wait_lock hold */
++ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
++ return 0;
++
++ /* We've got a new event... */
++ *ev = *new_event;
++
++ CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
++ new_event, eq->eq_deq_seq, eq->eq_size);
++
++ /* ...but did it overwrite an event we've not seen yet? */
++ if (eq->eq_deq_seq == new_event->sequence) {
++ rc = 1;
++ } else {
++ /*
++ * don't complain with CERROR: some EQs are sized small
++ * anyway; if it's important, the caller should complain
++ */
++ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
++ eq->eq_deq_seq, new_event->sequence);
++ rc = -EOVERFLOW;
++ }
++
++ eq->eq_deq_seq = new_event->sequence + 1;
++ return rc;
++}
++
++/**
++ * A nonblocking function that can be used to get the next event in an EQ.
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully. The event is removed from the queue.
++ *
++ * \param eventq A handle for the event queue.
++ * \param event On successful return (1 or -EOVERFLOW), this location will
++ * hold the next event in the EQ.
++ *
++ * \retval 0 No pending event in the EQ.
++ * \retval 1 Indicates success.
++ * \retval -ENOENT If \a eventq does not point to a valid EQ.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ has been dropped due to limited space in the EQ.
++ */
++
++/**
++ * Block the calling process until there is an event in the EQ.
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully. This function returns the next event
++ * in the EQ and removes it from the EQ.
++ *
++ * \param eventq A handle for the event queue.
++ * \param event On successful return (1 or -EOVERFLOW), this location will
++ * hold the next event in the EQ.
++ *
++ * \retval 1 Indicates success.
++ * \retval -ENOENT If \a eventq does not point to a valid EQ.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ has been dropped due to limited space in the EQ.
++ */
++
++static int
++lnet_eq_wait_locked(int *timeout_ms, long state)
++__must_hold(&the_lnet.ln_eq_wait_lock)
++{
++ int tms = *timeout_ms;
++ int wait;
++ wait_queue_entry_t wl;
++ unsigned long now;
++
++ if (!tms)
++ return -ENXIO; /* don't want to wait and no new event */
++
++ init_waitqueue_entry(&wl, current);
++ set_current_state(state);
++ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
++
++ lnet_eq_wait_unlock();
++
++ if (tms < 0) {
++ schedule();
++ } else {
++ now = jiffies;
++ schedule_msec_hrtimeout((tms));
++ tms -= jiffies_to_msecs(jiffies - now);
++ if (tms < 0) /* no more wait but may have new event */
++ tms = 0;
++ }
++
++ wait = tms; /* might need to call here again */
++ *timeout_ms = tms;
++
++ lnet_eq_wait_lock();
++ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
++
++ return wait;
++}
++
++/**
++ * Block the calling process until there's an event from a set of EQs or
++ * timeout happens.
++ *
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully, in which case the corresponding event
++ * is consumed.
++ *
++ * LNetEQPoll() provides a timeout to allow applications to poll, block for a
++ * fixed period, or block indefinitely.
++ *
++ * \param eventqs,neq An array of EQ handles, and size of the array.
++ * \param timeout_ms Time in milliseconds to wait for an event to occur on
++ * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
++ * infinite timeout.
++ * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD
++ * \param event,which On successful return (1 or -EOVERFLOW), \a event will
++ * hold the next event in the EQs, and \a which will contain the index of the
++ * EQ from which the event was taken.
++ *
++ * \retval 0 No pending event in the EQs after timeout.
++ * \retval 1 Indicates success.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ indicated by \a which has been dropped due to limited space in the EQ.
++ * \retval -ENOENT If there's an invalid handle in \a eventqs.
++ */
++int
++LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
++ int interruptible,
++ struct lnet_event *event, int *which)
++{
++ int wait = 1;
++ int rc;
++ int i;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ if (neq < 1)
++ return -ENOENT;
++
++ lnet_eq_wait_lock();
++
++ for (;;) {
++ for (i = 0; i < neq; i++) {
++ struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]);
++
++ if (!eq) {
++ lnet_eq_wait_unlock();
++ return -ENOENT;
++ }
++
++ rc = lnet_eq_dequeue_event(eq, event);
++ if (rc) {
++ lnet_eq_wait_unlock();
++ *which = i;
++ return rc;
++ }
++ }
++
++ if (!wait)
++ break;
++
++ /*
++ * return value of lnet_eq_wait_locked:
++ * -1 : did nothing and it's sure no new event
++ * 1 : sleep inside and wait until new event
++ * 0 : don't want to wait anymore, but might have new event
++ * so need to call dequeue again
++ */
++ wait = lnet_eq_wait_locked(&timeout_ms,
++ interruptible ? TASK_INTERRUPTIBLE
++ : TASK_NOLOAD);
++ if (wait < 0) /* no new event */
++ break;
++ }
++
++ lnet_eq_wait_unlock();
++ return 0;
++}
+diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
+index fa597953e9a0..685cf842badc 100644
+--- a/drivers/staging/rts5208/rtsx.c
++++ b/drivers/staging/rts5208/rtsx.c
+@@ -490,7 +490,7 @@ static int rtsx_polling_thread(void *__dev)
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
++ schedule_msec_hrtimeout((POLLING_INTERVAL));
+
+ /* lock the device pointers */
+ mutex_lock(&dev->dev_mutex);
+diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
+index c94328a5bd4a..6e7d4671aa69 100644
+--- a/drivers/staging/speakup/speakup_acntpc.c
++++ b/drivers/staging/speakup/speakup_acntpc.c
+@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
+index 0877b4044c28..627102d048c1 100644
+--- a/drivers/staging/speakup/speakup_apollo.c
++++ b/drivers/staging/speakup/speakup_apollo.c
+@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
+index ddbb7e97d118..f9502addc765 100644
+--- a/drivers/staging/speakup/speakup_decext.c
++++ b/drivers/staging/speakup/speakup_decext.c
+@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
+index 798c42dfa16c..d85b41db67a3 100644
+--- a/drivers/staging/speakup/speakup_decpc.c
++++ b/drivers/staging/speakup/speakup_decpc.c
+@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (dt_sendchar(ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
+index dccb4ea29d37..8ecead307d04 100644
+--- a/drivers/staging/speakup/speakup_dectlk.c
++++ b/drivers/staging/speakup/speakup_dectlk.c
+@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
+index dbebed0eeeec..6d83c13ca4a6 100644
+--- a/drivers/staging/speakup/speakup_dtlk.c
++++ b/drivers/staging/speakup/speakup_dtlk.c
+@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
+index 414827e888fc..cb31c9176daa 100644
+--- a/drivers/staging/speakup/speakup_keypc.c
++++ b/drivers/staging/speakup/speakup_keypc.c
+@@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index 3568bfb89912..0a80b3b098b2 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (ch == '\n')
+ ch = synth->procspeech;
+- if (unicode)
+- ret = synth->io_ops->synth_out_unicode(synth, ch);
+- else
+- ret = synth->io_ops->synth_out(synth, ch);
+- if (!ret) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ if (!synth->io_ops->synth_out(synth, ch)) {
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+@@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth->io_ops->synth_out(synth, synth->procspeech))
+- schedule_timeout(
+- msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ else
+- schedule_timeout(
+- msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+index 1d1440d43002..52fe89ae1d9d 100644
+--- a/drivers/staging/unisys/visornic/visornic_main.c
++++ b/drivers/staging/unisys/visornic/visornic_main.c
+@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (atomic_read(&devdata->usage))
+ break;
+@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
+index cfe63932f825..71c00ef772a3 100644
+--- a/drivers/video/fbdev/omap/hwa742.c
++++ b/drivers/video/fbdev/omap/hwa742.c
+@@ -913,7 +913,7 @@ static void hwa742_resume(void)
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(5));
++ schedule_msec_hrtimeout((5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+ }
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index f70c9f79622e..0b363eaee24f 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
+ mutex_unlock(&fbi->ctrlr_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(30));
++ schedule_msec_hrtimeout((30));
+ }
+
+ pr_debug("%s(): task ending\n", __func__);
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 37345fb6191d..3874c17d1bc5 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -91,7 +91,7 @@ static int caching_kthread(void *data)
+ btrfs_release_path(path);
+ root->ino_cache_progress = last;
+ up_read(&fs_info->commit_root_sem);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ goto again;
+ } else
+ continue;
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index f70211e6b174..5ae4421225e6 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
+--
+2.20.1
+