Displaying 20 results from an estimated 26 matches for "nr_vring".
2023 May 08
1
[PATCH V2 4/5] vDPA/ifcvf: synchronize irqs in the reset routine
...-368,20 +365,42 @@ void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
vp_iowrite16(ready, &cfg->queue_enable);
}
-static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
{
- u32 i;
+ u16 qid;
+
+ for (qid = 0; qid < hw->nr_vring; qid++) {
+ hw->vring[qid].cb.callback = NULL;
+ hw->vring[qid].cb.private = NULL;
+ ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
+ }
+}
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+ hw->config_cb.callback = NULL;
+ hw->config_cb.private = NULL;
ifcvf_s...
2023 May 24
2
[PATCH V2 4/5] vDPA/ifcvf: synchronize irqs in the reset routine
...bool ready)
> vp_iowrite16(ready, &cfg->queue_enable);
> }
>
> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> {
> - u32 i;
> + u16 qid;
> +
> + for (qid = 0; qid < hw->nr_vring; qid++) {
> + hw->vring[qid].cb.callback = NULL;
> + hw->vring[qid].cb.private = NULL;
> + ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
> + }
> +}
>
> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
>...
2023 Mar 31
7
[PATCH 0/5] vDPA/ifcvf: implement immediate initialization mechanism
Formerly, ifcvf driver has implemented a lazy-initialization mechanism
for the virtqueues and other config space contents,
it would store all configurations that passed down from the userspace,
then load them to the device config space upon DRIVER_OK.
This can not serve live migration, so this series implement an
immediate initialization mechanism, which means rather than the
former store-load
2023 May 08
6
[PATCH V2 0/5] vDPA/ifcvf: implement immediate initialization mechanism
Formerly, ifcvf driver has implemented a lazy-initialization mechanism
for the virtqueues and other config space contents,
it would store all configurations that passed down from the userspace,
then load them to the device config space upon DRIVER_OK.
This can not serve live migration, so this series implement an
immediate initialization mechanism, which means rather than the
former store-load
2023 May 08
1
[PATCH V2 1/5] vDPA/ifcvf: virt queue ops take immediate actions
...n 0;
}
-static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
{
- struct virtio_pci_common_cfg __iomem *cfg;
- u32 i;
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
- for (i = 0; i < hw->nr_vring; i++) {
- if (!hw->vring[i].ready)
- break;
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(num, &cfg->queue_size);
+}
- vp_iowrite16(i, &cfg->queue_select);
- vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
- &cfg->queue_...
2019 Oct 21
0
[RFC 1/2] vhost: IFC VF hardware operation layer
...config);
>>> +??? if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
>>> +??????? IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
>>> +??????? return -1;
>>> +??? }
>>> +
>>> +??? for (i = 0; i < hw->nr_vring; i++) {
>>> +??????? iowrite16(i, &cfg->queue_select);
>>> +??????? io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
>>> +??????????????? &cfg->queue_desc_hi);
>>> +??????? io_write64_twopart(hw->vring[i].avail, &cfg->q...
2023 Mar 31
1
[PATCH 1/5] virt queue ops take immediate actions
...n 0;
}
-static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
{
- struct virtio_pci_common_cfg __iomem *cfg;
- u32 i;
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
- for (i = 0; i < hw->nr_vring; i++) {
- if (!hw->vring[i].ready)
- break;
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(num, &cfg->queue_size);
+}
- vp_iowrite16(i, &cfg->queue_select);
- vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
- &cfg->queue_...
2019 Nov 08
0
[PATCH 1/2] IFC hardware operation layer
...cfg;
> + iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
> +
> + if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
> + IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
> + return -1;
> + }
> +
> + for (i = 0; i < hw->nr_vring; i++) {
> + iowrite16(i, &cfg->queue_select);
> + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> +...
2019 Nov 05
1
[PATCH 1/2] IFC hardware operation layer
...cfg;
> + iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
> +
> + if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
> + IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
> + return -1;
> + }
> +
> + for (i = 0; i < hw->nr_vring; i++) {
> + iowrite16(i, &cfg->queue_select);
> + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> +...
2019 Nov 05
0
[PATCH 1/2] IFC hardware operation layer
...cfg;
> + iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
> +
> + if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
> + IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
> + return -1;
> + }
> +
> + for (i = 0; i < hw->nr_vring; i++) {
> + iowrite16(i, &cfg->queue_select);
> + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> +...
2019 Nov 06
0
[PATCH 1/2] IFC hardware operation layer
...cfg;
> + iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
> +
> + if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
> + IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
> + return -1;
> + }
> +
> + for (i = 0; i < hw->nr_vring; i++) {
> + iowrite16(i, &cfg->queue_select);
> + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> +...
2019 Oct 16
0
[RFC 1/2] vhost: IFC VF hardware operation layer
...cfg;
> +
> + iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
> + if (ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
> + IFC_ERR(ifcvf->dev, "No msix vector for device config.\n");
> + return -1;
> + }
> +
> + for (i = 0; i < hw->nr_vring; i++) {
> + iowrite16(i, &cfg->queue_select);
> + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> +...
2018 Apr 02
2
[RFC] vhost: introduce mdev based hardware vhost backend
...a_map;
vdpa_dma_unmap_t dma_unmap;
vdpa_set_eventfd_t set_eventfd;
vdpa_supported_features_t supported_features;
vdpa_notify_device_t notify;
vdpa_get_notify_addr_t get_notify_addr;
};
struct vdpa_dev {
struct mdev_device *mdev;
struct mutex ops_lock;
u8 vconfig[VDPA_CONFIG_SIZE];
int nr_vring;
u64 features;
u64 state;
struct vhost_memory *mem_table;
bool pending_reply;
struct vhost_vfio_op pending;
const struct vdpa_device_ops *ops;
void *private;
int max_vrings;
struct vdpa_vring_info vring_info[0];
};
struct vdpa_dev *vdpa_alloc(struct mdev_device *mdev, void *private,...
2018 Apr 02
2
[RFC] vhost: introduce mdev based hardware vhost backend
...a_map;
vdpa_dma_unmap_t dma_unmap;
vdpa_set_eventfd_t set_eventfd;
vdpa_supported_features_t supported_features;
vdpa_notify_device_t notify;
vdpa_get_notify_addr_t get_notify_addr;
};
struct vdpa_dev {
struct mdev_device *mdev;
struct mutex ops_lock;
u8 vconfig[VDPA_CONFIG_SIZE];
int nr_vring;
u64 features;
u64 state;
struct vhost_memory *mem_table;
bool pending_reply;
struct vhost_vfio_op pending;
const struct vdpa_device_ops *ops;
void *private;
int max_vrings;
struct vdpa_vring_info vring_info[0];
};
struct vdpa_dev *vdpa_alloc(struct mdev_device *mdev, void *private,...
2018 Apr 10
0
[RFC] vhost: introduce mdev based hardware vhost backend
...entfd_t set_eventfd;
> vdpa_supported_features_t supported_features;
> vdpa_notify_device_t notify;
> vdpa_get_notify_addr_t get_notify_addr;
> };
>
> struct vdpa_dev {
> struct mdev_device *mdev;
> struct mutex ops_lock;
> u8 vconfig[VDPA_CONFIG_SIZE];
> int nr_vring;
> u64 features;
> u64 state;
> struct vhost_memory *mem_table;
> bool pending_reply;
> struct vhost_vfio_op pending;
> const struct vdpa_device_ops *ops;
> void *private;
> int max_vrings;
> struct vdpa_vring_info vring_info[0];
> };
>
> struct vdpa_dev...
2020 Apr 02
0
[PATCH v2 -next] vdpa: remove unused variables 'ifcvf' and 'ifcvf_lm'
...dpa/ifcvf/ifcvf_main.c
@@ -31,11 +31,9 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
static int ifcvf_start_datapath(void *private)
{
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
- struct ifcvf_adapter *ifcvf;
u8 status;
int ret;
- ifcvf = vf_to_adapter(vf);
vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
ret = ifcvf_start_hw(vf);
if (ret < 0) {
--
2.17.1
2023 Jan 27
0
[PATCH] vdpa: ifcvf: Do proper cleanup if IFCVF init fails
...dev *pdev, const
> struct pci_device_id *id)
BTW your patch was mangled but I fixed it.
> ret = ifcvf_init_hw(vf, pdev);
> if (ret) {
> IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
> - return ret;
> + goto err;
> }
>
> for (i = 0; i < vf->nr_vring; i++)
> --
> 2.34.1
>
2019 Jul 03
0
[RFC v2] vhost: introduce mdev based hardware vhost backend
...{
> + struct vdpa_dev *vdpa;
> + int qid = addr->index;
> + struct vdpa_vring_info *vring;
> +
> + vdpa = mdev_get_drvdata(mdev);
> + if (!vdpa)
> + return -ENODEV;
> +
> + if (qid >= vdpa->max_vrings)
> + return -EINVAL;
> +
> + if (qid >= vdpa->nr_vring)
> + vdpa->nr_vring = qid + 1;
> +
> + vring = &vdpa->vring_info[qid];
> +
> + vring->desc_user_addr = addr->desc_user_addr;
> + vring->used_user_addr = addr->used_user_addr;
> + vring->avail_user_addr = addr->avail_user_addr;
> + vring->log_g...
2019 Jul 03
4
[RFC v2] vhost: introduce mdev based hardware vhost backend
...truct mdev_device *mdev,
+ struct vhost_vring_addr *addr)
+{
+ struct vdpa_dev *vdpa;
+ int qid = addr->index;
+ struct vdpa_vring_info *vring;
+
+ vdpa = mdev_get_drvdata(mdev);
+ if (!vdpa)
+ return -ENODEV;
+
+ if (qid >= vdpa->max_vrings)
+ return -EINVAL;
+
+ if (qid >= vdpa->nr_vring)
+ vdpa->nr_vring = qid + 1;
+
+ vring = &vdpa->vring_info[qid];
+
+ vring->desc_user_addr = addr->desc_user_addr;
+ vring->used_user_addr = addr->used_user_addr;
+ vring->avail_user_addr = addr->avail_user_addr;
+ vring->log_guest_addr = addr->log_guest_addr;
+
+...
2019 Jul 03
4
[RFC v2] vhost: introduce mdev based hardware vhost backend
...truct mdev_device *mdev,
+ struct vhost_vring_addr *addr)
+{
+ struct vdpa_dev *vdpa;
+ int qid = addr->index;
+ struct vdpa_vring_info *vring;
+
+ vdpa = mdev_get_drvdata(mdev);
+ if (!vdpa)
+ return -ENODEV;
+
+ if (qid >= vdpa->max_vrings)
+ return -EINVAL;
+
+ if (qid >= vdpa->nr_vring)
+ vdpa->nr_vring = qid + 1;
+
+ vring = &vdpa->vring_info[qid];
+
+ vring->desc_user_addr = addr->desc_user_addr;
+ vring->used_user_addr = addr->used_user_addr;
+ vring->avail_user_addr = addr->avail_user_addr;
+ vring->log_guest_addr = addr->log_guest_addr;
+
+...