summaryrefslogtreecommitdiff
path: root/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch')
-rw-r--r--sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch1009
1 files changed, 1009 insertions, 0 deletions
diff --git a/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
new file mode 100644
index 00000000..f4b441d8
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
@@ -0,0 +1,1009 @@
+From 8a679ba5279cbff1a8e4c47b55ac4bd6d66289f8 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:28:30 +1100
+Subject: [PATCH 08/16] Replace all schedule timeout(1) with
+ schedule_min_hrtimeout()
+
+---
+ drivers/block/swim.c | 6 +-
+ drivers/bluetooth/hci_qca.c | 2 +-
+ drivers/char/ipmi/ipmi_msghandler.c | 2 +-
+ drivers/char/ipmi/ipmi_ssif.c | 2 +-
+ drivers/char/snsc.c | 4 +-
+ drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +-
+ drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +-
+ drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +-
+ drivers/media/pci/ivtv/ivtv-streams.c | 2 +-
+ drivers/mfd/ucb1x00-core.c | 2 +-
+ drivers/misc/sgi-xp/xpc_channel.c | 2 +-
+ drivers/net/caif/caif_hsi.c | 2 +-
+ drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +-
+ drivers/net/usb/lan78xx.c | 2 +-
+ drivers/net/usb/usbnet.c | 2 +-
+ drivers/scsi/fnic/fnic_scsi.c | 4 +-
+ drivers/scsi/snic/snic_scsi.c | 2 +-
+ .../staging/comedi/drivers/ni_mio_common.c | 2 +-
+ drivers/staging/lustre/lnet/lnet/lib-eq.c | 426 ++++++++++++++++++
+ drivers/staging/rts5208/rtsx.c | 2 +-
+ drivers/staging/speakup/speakup_acntpc.c | 4 +-
+ drivers/staging/speakup/speakup_apollo.c | 2 +-
+ drivers/staging/speakup/speakup_decext.c | 2 +-
+ drivers/staging/speakup/speakup_decpc.c | 2 +-
+ drivers/staging/speakup/speakup_dectlk.c | 2 +-
+ drivers/staging/speakup/speakup_dtlk.c | 4 +-
+ drivers/staging/speakup/speakup_keypc.c | 4 +-
+ drivers/staging/speakup/synth.c | 14 +-
+ .../staging/unisys/visornic/visornic_main.c | 6 +-
+ drivers/video/fbdev/omap/hwa742.c | 2 +-
+ drivers/video/fbdev/pxafb.c | 2 +-
+ fs/btrfs/extent-tree.c | 2 +-
+ fs/btrfs/inode-map.c | 2 +-
+ sound/usb/line6/pcm.c | 2 +-
+ 34 files changed, 471 insertions(+), 51 deletions(-)
+ create mode 100644 drivers/staging/lustre/lnet/lnet/lib-eq.c
+
+diff --git a/drivers/block/swim.c b/drivers/block/swim.c
+index 0e31884a9519..16fcfbde31d5 100644
+--- a/drivers/block/swim.c
++++ b/drivers/block/swim.c
+@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base,
+ if (swim_readbit(base, MOTOR_ON))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ } else if (action == OFF) {
+ swim_action(base, MOTOR_OFF);
+@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base)
+ if (!swim_readbit(base, DISK_IN))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ swim_select(base, RELAX);
+ }
+@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base)
+ for (wait = 0; wait < HZ; wait++) {
+
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ swim_select(base, RELAX);
+ if (!swim_readbit(base, STEP))
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 2fee65886d50..4ca0bae3df58 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -980,7 +980,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
++ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_RUNNING);
+
+ if (qcadev->btsoc_type == QCA_WCN3990)
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 7fc9612070a1..5a7f8a879001 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3453,7 +3453,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ /* No need for locks, the interface is down. */
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 29e67a80fb20..73bd0eca5fe5 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1208,7 +1208,7 @@ static void shutdown_ssif(void *send_info)
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
+index 5918ea7499bb..5228e78df804 100644
+--- a/drivers/char/snsc.c
++++ b/drivers/char/snsc.c
+@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+ add_wait_queue(&sd->sd_rq, &wait);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_rq, &wait);
+ if (signal_pending(current)) {
+@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
+ add_wait_queue(&sd->sd_wq, &wait);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_wq, &wait);
+ if (signal_pending(current)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index d0fd147ef75f..730ae4fe6b85 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index c3ad4478266b..7e2a29d56459 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -202,7 +202,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+ break;
+ }
+ if (lazy)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
+index 4cdc6d2be85d..22c0803cbff3 100644
+--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
+@@ -1154,7 +1154,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+- schedule_timeout(msecs_to_jiffies(25));
++ schedule_msec_hrtimeout((25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
+diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
+index d27c6df97566..e9ffc4eeb478 100644
+--- a/drivers/media/pci/ivtv/ivtv-streams.c
++++ b/drivers/media/pci/ivtv/ivtv-streams.c
+@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ time_before(jiffies,
+ then + msecs_to_jiffies(2000))) {
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ }
+
+ /* To convert jiffies to ms, we must multiply by 1000
+diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
+index d6fb2e1a759a..7ac951b84beb 100644
+--- a/drivers/mfd/ucb1x00-core.c
++++ b/drivers/mfd/ucb1x00-core.c
+@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
+ break;
+ /* yield to other processes */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ return UCB_ADC_DAT(val);
+diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
+index 05a890ce2ab8..f6eb97bc3a2c 100644
+--- a/drivers/misc/sgi-xp/xpc_channel.c
++++ b/drivers/misc/sgi-xp/xpc_channel.c
+@@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+- ret = schedule_timeout(1);
++ ret = schedule_min_hrtimeout();
+ finish_wait(&ch->msg_allocate_wq, &wait);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index 433a14b9f731..4d197a99472b 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ retry--;
+ }
+
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+index 13238a72a338..fc51ae55c63f 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
+@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index c3c9ba44e2a1..1bc66289699f 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2681,7 +2681,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 770aa624147f..9384de186bf9 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -770,7 +770,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 8cbd3c9f0b4c..7e3f9baa4ac6 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+@@ -2255,7 +2255,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
+ }
+ }
+
+- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
+index d9b2e46424aa..4a313a0f2039 100644
+--- a/drivers/scsi/snic/snic_scsi.c
++++ b/drivers/scsi/snic/snic_scsi.c
+@@ -2354,7 +2354,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 4dee2fc37aed..2bb1c1157636 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -4650,7 +4650,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
+ if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (schedule_timeout(1))
++ if (schedule_min_hrtimeout())
+ return -EIO;
+ }
+ if (i == timeout) {
+diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+new file mode 100644
+index 000000000000..8cca151741b2
+--- /dev/null
++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+@@ -0,0 +1,426 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * GPL HEADER START
++ *
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 only,
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License version 2 for more details (a copy is included
++ * in the LICENSE file that accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License
++ * version 2 along with this program; If not, see
++ * http://www.gnu.org/licenses/gpl-2.0.html
++ *
++ * GPL HEADER END
++ */
++/*
++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
++ * Use is subject to license terms.
++ *
++ * Copyright (c) 2012, Intel Corporation.
++ */
++/*
++ * This file is part of Lustre, http://www.lustre.org/
++ * Lustre is a trademark of Sun Microsystems, Inc.
++ *
++ * lnet/lnet/lib-eq.c
++ *
++ * Library level Event queue management routines
++ */
++
++#define DEBUG_SUBSYSTEM S_LNET
++
++#include <linux/lnet/lib-lnet.h>
++
++/**
++ * Create an event queue that has room for \a count number of events.
++ *
++ * The event queue is circular and older events will be overwritten by new
++ * ones if they are not removed in time by the user using the functions
++ * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
++ * determine the appropriate size of the event queue to prevent this loss
++ * of events. Note that when EQ handler is specified in \a callback, no
++ * event loss can happen, since the handler is run for each event deposited
++ * into the EQ.
++ *
++ * \param count The number of events to be stored in the event queue. It
++ * will be rounded up to the next power of two.
++ * \param callback A handler function that runs when an event is deposited
++ * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
++ * indicate that no event handler is desired.
++ * \param handle On successful return, this location will hold a handle for
++ * the newly created EQ.
++ *
++ * \retval 0 On success.
++ * \retval -EINVAL If an parameter is not valid.
++ * \retval -ENOMEM If memory for the EQ can't be allocated.
++ *
++ * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
++ */
++int
++LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
++ struct lnet_handle_eq *handle)
++{
++ struct lnet_eq *eq;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ /*
++ * We need count to be a power of 2 so that when eq_{enq,deq}_seq
++ * overflow, they don't skip entries, so the queue has the same
++ * apparent capacity at all times
++ */
++ if (count)
++ count = roundup_pow_of_two(count);
++
++ if (callback != LNET_EQ_HANDLER_NONE && count)
++ CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
++
++ /*
++ * count can be 0 if only need callback, we can eliminate
++ * overhead of enqueue event
++ */
++ if (!count && callback == LNET_EQ_HANDLER_NONE)
++ return -EINVAL;
++
++ eq = kzalloc(sizeof(*eq), GFP_NOFS);
++ if (!eq)
++ return -ENOMEM;
++
++ if (count) {
++ eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event),
++ GFP_KERNEL | __GFP_ZERO);
++ if (!eq->eq_events)
++ goto failed;
++ /*
++ * NB allocator has set all event sequence numbers to 0,
++ * so all them should be earlier than eq_deq_seq
++ */
++ }
++
++ eq->eq_deq_seq = 1;
++ eq->eq_enq_seq = 1;
++ eq->eq_size = count;
++ eq->eq_callback = callback;
++
++ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
++ sizeof(*eq->eq_refs[0]));
++ if (!eq->eq_refs)
++ goto failed;
++
++ /* MUST hold both exclusive lnet_res_lock */
++ lnet_res_lock(LNET_LOCK_EX);
++ /*
++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
++ * both EQ lookup and poll event with only lnet_eq_wait_lock
++ */
++ lnet_eq_wait_lock();
++
++ lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
++ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
++
++ lnet_eq_wait_unlock();
++ lnet_res_unlock(LNET_LOCK_EX);
++
++ lnet_eq2handle(handle, eq);
++ return 0;
++
++failed:
++ kvfree(eq->eq_events);
++
++ if (eq->eq_refs)
++ cfs_percpt_free(eq->eq_refs);
++
++ kfree(eq);
++ return -ENOMEM;
++}
++EXPORT_SYMBOL(LNetEQAlloc);
++
++/**
++ * Release the resources associated with an event queue if it's idle;
++ * otherwise do nothing and it's up to the user to try again.
++ *
++ * \param eqh A handle for the event queue to be released.
++ *
++ * \retval 0 If the EQ is not in use and freed.
++ * \retval -ENOENT If \a eqh does not point to a valid EQ.
++ * \retval -EBUSY If the EQ is still in use by some MDs.
++ */
++int
++LNetEQFree(struct lnet_handle_eq eqh)
++{
++ struct lnet_eq *eq;
++ struct lnet_event *events = NULL;
++ int **refs = NULL;
++ int *ref;
++ int rc = 0;
++ int size = 0;
++ int i;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ lnet_res_lock(LNET_LOCK_EX);
++ /*
++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
++ * both EQ lookup and poll event with only lnet_eq_wait_lock
++ */
++ lnet_eq_wait_lock();
++
++ eq = lnet_handle2eq(&eqh);
++ if (!eq) {
++ rc = -ENOENT;
++ goto out;
++ }
++
++ cfs_percpt_for_each(ref, i, eq->eq_refs) {
++ LASSERT(*ref >= 0);
++ if (!*ref)
++ continue;
++
++ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
++ i, *ref);
++ rc = -EBUSY;
++ goto out;
++ }
++
++ /* stash for free after lock dropped */
++ events = eq->eq_events;
++ size = eq->eq_size;
++ refs = eq->eq_refs;
++
++ lnet_res_lh_invalidate(&eq->eq_lh);
++ list_del(&eq->eq_list);
++ kfree(eq);
++ out:
++ lnet_eq_wait_unlock();
++ lnet_res_unlock(LNET_LOCK_EX);
++
++ kvfree(events);
++ if (refs)
++ cfs_percpt_free(refs);
++
++ return rc;
++}
++EXPORT_SYMBOL(LNetEQFree);
++
++void
++lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev)
++{
++ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
++ int index;
++
++ if (!eq->eq_size) {
++ LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
++ eq->eq_callback(ev);
++ return;
++ }
++
++ lnet_eq_wait_lock();
++ ev->sequence = eq->eq_enq_seq++;
++
++ LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
++ index = ev->sequence & (eq->eq_size - 1);
++
++ eq->eq_events[index] = *ev;
++
++ if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
++ eq->eq_callback(ev);
++
++ /* Wake anyone waiting in LNetEQPoll() */
++ if (waitqueue_active(&the_lnet.ln_eq_waitq))
++ wake_up_all(&the_lnet.ln_eq_waitq);
++ lnet_eq_wait_unlock();
++}
++
++static int
++lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev)
++{
++ int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
++ struct lnet_event *new_event = &eq->eq_events[new_index];
++ int rc;
++
++ /* must called with lnet_eq_wait_lock hold */
++ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
++ return 0;
++
++ /* We've got a new event... */
++ *ev = *new_event;
++
++ CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
++ new_event, eq->eq_deq_seq, eq->eq_size);
++
++ /* ...but did it overwrite an event we've not seen yet? */
++ if (eq->eq_deq_seq == new_event->sequence) {
++ rc = 1;
++ } else {
++ /*
++ * don't complain with CERROR: some EQs are sized small
++ * anyway; if it's important, the caller should complain
++ */
++ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
++ eq->eq_deq_seq, new_event->sequence);
++ rc = -EOVERFLOW;
++ }
++
++ eq->eq_deq_seq = new_event->sequence + 1;
++ return rc;
++}
++
++/**
++ * A nonblocking function that can be used to get the next event in an EQ.
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully. The event is removed from the queue.
++ *
++ * \param eventq A handle for the event queue.
++ * \param event On successful return (1 or -EOVERFLOW), this location will
++ * hold the next event in the EQ.
++ *
++ * \retval 0 No pending event in the EQ.
++ * \retval 1 Indicates success.
++ * \retval -ENOENT If \a eventq does not point to a valid EQ.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ has been dropped due to limited space in the EQ.
++ */
++
++/**
++ * Block the calling process until there is an event in the EQ.
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully. This function returns the next event
++ * in the EQ and removes it from the EQ.
++ *
++ * \param eventq A handle for the event queue.
++ * \param event On successful return (1 or -EOVERFLOW), this location will
++ * hold the next event in the EQ.
++ *
++ * \retval 1 Indicates success.
++ * \retval -ENOENT If \a eventq does not point to a valid EQ.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ has been dropped due to limited space in the EQ.
++ */
++
++static int
++lnet_eq_wait_locked(int *timeout_ms, long state)
++__must_hold(&the_lnet.ln_eq_wait_lock)
++{
++ int tms = *timeout_ms;
++ int wait;
++ wait_queue_entry_t wl;
++ unsigned long now;
++
++ if (!tms)
++ return -ENXIO; /* don't want to wait and no new event */
++
++ init_waitqueue_entry(&wl, current);
++ set_current_state(state);
++ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
++
++ lnet_eq_wait_unlock();
++
++ if (tms < 0) {
++ schedule();
++ } else {
++ now = jiffies;
++ schedule_msec_hrtimeout((tms));
++ tms -= jiffies_to_msecs(jiffies - now);
++ if (tms < 0) /* no more wait but may have new event */
++ tms = 0;
++ }
++
++ wait = tms; /* might need to call here again */
++ *timeout_ms = tms;
++
++ lnet_eq_wait_lock();
++ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
++
++ return wait;
++}
++
++/**
++ * Block the calling process until there's an event from a set of EQs or
++ * timeout happens.
++ *
++ * If an event handler is associated with the EQ, the handler will run before
++ * this function returns successfully, in which case the corresponding event
++ * is consumed.
++ *
++ * LNetEQPoll() provides a timeout to allow applications to poll, block for a
++ * fixed period, or block indefinitely.
++ *
++ * \param eventqs,neq An array of EQ handles, and size of the array.
++ * \param timeout_ms Time in milliseconds to wait for an event to occur on
++ * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
++ * infinite timeout.
++ * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD
++ * \param event,which On successful return (1 or -EOVERFLOW), \a event will
++ * hold the next event in the EQs, and \a which will contain the index of the
++ * EQ from which the event was taken.
++ *
++ * \retval 0 No pending event in the EQs after timeout.
++ * \retval 1 Indicates success.
++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
++ * at least one event between this event and the last event obtained from the
++ * EQ indicated by \a which has been dropped due to limited space in the EQ.
++ * \retval -ENOENT If there's an invalid handle in \a eventqs.
++ */
++int
++LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
++ int interruptible,
++ struct lnet_event *event, int *which)
++{
++ int wait = 1;
++ int rc;
++ int i;
++
++ LASSERT(the_lnet.ln_refcount > 0);
++
++ if (neq < 1)
++ return -ENOENT;
++
++ lnet_eq_wait_lock();
++
++ for (;;) {
++ for (i = 0; i < neq; i++) {
++ struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]);
++
++ if (!eq) {
++ lnet_eq_wait_unlock();
++ return -ENOENT;
++ }
++
++ rc = lnet_eq_dequeue_event(eq, event);
++ if (rc) {
++ lnet_eq_wait_unlock();
++ *which = i;
++ return rc;
++ }
++ }
++
++ if (!wait)
++ break;
++
++ /*
++ * return value of lnet_eq_wait_locked:
++ * -1 : did nothing and it's sure no new event
++ * 1 : sleep inside and wait until new event
++ * 0 : don't want to wait anymore, but might have new event
++ * so need to call dequeue again
++ */
++ wait = lnet_eq_wait_locked(&timeout_ms,
++ interruptible ? TASK_INTERRUPTIBLE
++ : TASK_NOLOAD);
++ if (wait < 0) /* no new event */
++ break;
++ }
++
++ lnet_eq_wait_unlock();
++ return 0;
++}
+diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
+index 69e6abe14abf..7d23e214ac21 100644
+--- a/drivers/staging/rts5208/rtsx.c
++++ b/drivers/staging/rts5208/rtsx.c
+@@ -507,7 +507,7 @@ static int rtsx_polling_thread(void *__dev)
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
++ schedule_msec_hrtimeout((POLLING_INTERVAL));
+
+ /* lock the device pointers */
+ mutex_lock(&dev->dev_mutex);
+diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
+index 28519754b2f0..a96805bbec5c 100644
+--- a/drivers/staging/speakup/speakup_acntpc.c
++++ b/drivers/staging/speakup/speakup_acntpc.c
+@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
+index 0877b4044c28..627102d048c1 100644
+--- a/drivers/staging/speakup/speakup_apollo.c
++++ b/drivers/staging/speakup/speakup_apollo.c
+@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
+index 3741c0fcf5bb..bff857b4aa5f 100644
+--- a/drivers/staging/speakup/speakup_decext.c
++++ b/drivers/staging/speakup/speakup_decext.c
+@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
+index 6649309e0342..c60e4712d817 100644
+--- a/drivers/staging/speakup/speakup_decpc.c
++++ b/drivers/staging/speakup/speakup_decpc.c
+@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (dt_sendchar(ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
+index a144f28ee1a8..c34764fafe2b 100644
+--- a/drivers/staging/speakup/speakup_dectlk.c
++++ b/drivers/staging/speakup/speakup_dectlk.c
+@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth)
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
+index dbebed0eeeec..6d83c13ca4a6 100644
+--- a/drivers/staging/speakup/speakup_dtlk.c
++++ b/drivers/staging/speakup/speakup_dtlk.c
+@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
+ delay_time_val = delay_time->u.n.value;
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
+index 3901734982a4..4e8a7a98b46d 100644
+--- a/drivers/staging/speakup/speakup_keypc.c
++++ b/drivers/staging/speakup/speakup_keypc.c
+@@ -199,7 +199,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -232,7 +232,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies+jiffy_delta_val;
+ }
+ }
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index 25f259ee4ffc..b9721103e651 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (ch == '\n')
+ ch = synth->procspeech;
+- if (unicode)
+- ret = synth->io_ops->synth_out_unicode(synth, ch);
+- else
+- ret = synth->io_ops->synth_out(synth, ch);
+- if (!ret) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ if (!synth->io_ops->synth_out(synth, ch)) {
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+@@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth->io_ops->synth_out(synth, synth->procspeech))
+- schedule_timeout(
+- msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ else
+- schedule_timeout(
+- msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ set_current_state(TASK_RUNNING);
+diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+index 3647b8f1ed28..9fb26ccc2b3b 100644
+--- a/drivers/staging/unisys/visornic/visornic_main.c
++++ b/drivers/staging/unisys/visornic/visornic_main.c
+@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (atomic_read(&devdata->usage))
+ break;
+@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
+index 6199d4806193..7c7165f2dad4 100644
+--- a/drivers/video/fbdev/omap/hwa742.c
++++ b/drivers/video/fbdev/omap/hwa742.c
+@@ -926,7 +926,7 @@ static void hwa742_resume(void)
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(5));
++ schedule_msec_hrtimeout((5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+ }
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index bbed039617a4..681ae041ea77 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
+ mutex_unlock(&fbi->ctrlr_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(30));
++ schedule_msec_hrtimeout((30));
+ }
+
+ pr_debug("%s(): task ending\n", __func__);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 2d9074295d7f..7df3e60e4e89 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5905,7 +5905,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
+
+ if (btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ if (delalloc_lock)
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index ffca2abf13d0..89b2a7f7397e 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -75,7 +75,7 @@ static int caching_kthread(void *data)
+ btrfs_release_path(path);
+ root->ino_cache_progress = last;
+ up_read(&fs_info->commit_root_sem);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ goto again;
+ } else
+ continue;
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index 72c6f8e82a7e..46d8c2a148ad 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
+--
+2.17.1
+