15983 lines
464 KiB
C
15983 lines
464 KiB
C
/*******************************************************************
|
||
* This file is part of the Emulex Linux Device Driver for *
|
||
* Fibre Channel Host Bus Adapters. *
|
||
* Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
|
||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||
* EMULEX and SLI are trademarks of Emulex. *
|
||
* www.broadcom.com *
|
||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||
* *
|
||
* This program is free software; you can redistribute it and/or *
|
||
* modify it under the terms of version 2 of the GNU General *
|
||
* Public License as published by the Free Software Foundation. *
|
||
* This program is distributed in the hope that it will be useful. *
|
||
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
||
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
||
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
||
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
||
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
||
* more details, a copy of which can be found in the file COPYING *
|
||
* included with this package. *
|
||
*******************************************************************/
|
||
|
||
#include <linux/blkdev.h>
|
||
#include <linux/delay.h>
|
||
#include <linux/dma-mapping.h>
|
||
#include <linux/idr.h>
|
||
#include <linux/interrupt.h>
|
||
#include <linux/module.h>
|
||
#include <linux/kthread.h>
|
||
#include <linux/pci.h>
|
||
#include <linux/spinlock.h>
|
||
#include <linux/sched/clock.h>
|
||
#include <linux/ctype.h>
|
||
#include <linux/aer.h>
|
||
#include <linux/slab.h>
|
||
#include <linux/firmware.h>
|
||
#include <linux/miscdevice.h>
|
||
#include <linux/percpu.h>
|
||
#include <linux/irq.h>
|
||
#include <linux/bitops.h>
|
||
#include <linux/crash_dump.h>
|
||
#include <linux/cpu.h>
|
||
#include <linux/cpuhotplug.h>
|
||
|
||
#include <scsi/scsi.h>
|
||
#include <scsi/scsi_device.h>
|
||
#include <scsi/scsi_host.h>
|
||
#include <scsi/scsi_transport_fc.h>
|
||
#include <scsi/scsi_tcq.h>
|
||
#include <scsi/fc/fc_fs.h>
|
||
|
||
#include "lpfc_hw4.h"
|
||
#include "lpfc_hw.h"
|
||
#include "lpfc_sli.h"
|
||
#include "lpfc_sli4.h"
|
||
#include "lpfc_nl.h"
|
||
#include "lpfc_disc.h"
|
||
#include "lpfc.h"
|
||
#include "lpfc_scsi.h"
|
||
#include "lpfc_nvme.h"
|
||
#include "lpfc_logmsg.h"
|
||
#include "lpfc_crtn.h"
|
||
#include "lpfc_vport.h"
|
||
#include "lpfc_version.h"
|
||
#include "lpfc_ids.h"
|
||
|
||
static enum cpuhp_state lpfc_cpuhp_state;
|
||
/* Used when mapping IRQ vectors in a driver centric manner */
|
||
static uint32_t lpfc_present_cpu;
|
||
static bool lpfc_pldv_detect;
|
||
|
||
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
|
||
static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
|
||
static void lpfc_cpuhp_add(struct lpfc_hba *phba);
|
||
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
|
||
static int lpfc_post_rcv_buf(struct lpfc_hba *);
|
||
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
|
||
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
|
||
static int lpfc_setup_endian_order(struct lpfc_hba *);
|
||
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
|
||
static void lpfc_free_els_sgl_list(struct lpfc_hba *);
|
||
static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
|
||
static void lpfc_init_sgl_list(struct lpfc_hba *);
|
||
static int lpfc_init_active_sgl_array(struct lpfc_hba *);
|
||
static void lpfc_free_active_sgl(struct lpfc_hba *);
|
||
static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
|
||
static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
|
||
static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
|
||
static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
|
||
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
|
||
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
|
||
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
|
||
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
|
||
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
|
||
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
|
||
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
|
||
static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
|
||
|
||
static struct scsi_transport_template *lpfc_transport_template = NULL;
|
||
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
|
||
static DEFINE_IDR(lpfc_hba_index);
|
||
#define LPFC_NVMET_BUF_POST 254
|
||
static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
|
||
|
||
/**
|
||
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine will do LPFC initialization prior to issuing the CONFIG_PORT
|
||
* mailbox command. It retrieves the revision information from the HBA and
|
||
* collects the Vital Product Data (VPD) about the HBA for preparing the
|
||
* configuration of the HBA.
|
||
*
|
||
* Return codes:
|
||
* 0 - success.
|
||
* -ERESTART - requests the SLI layer to reset the HBA and try again.
|
||
* Any other value - indicates an error.
|
||
**/
|
||
int
|
||
lpfc_config_port_prep(struct lpfc_hba *phba)
|
||
{
|
||
lpfc_vpd_t *vp = &phba->vpd;
|
||
int i = 0, rc;
|
||
LPFC_MBOXQ_t *pmb;
|
||
MAILBOX_t *mb;
|
||
char *lpfc_vpd_data = NULL;
|
||
uint16_t offset = 0;
|
||
static char licensed[56] =
|
||
"key unlock for use with gnu public licensed code only\0";
|
||
static int init_key = 1;
|
||
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
|
||
mb = &pmb->u.mb;
|
||
phba->link_state = LPFC_INIT_MBX_CMDS;
|
||
|
||
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
|
||
if (init_key) {
|
||
uint32_t *ptext = (uint32_t *) licensed;
|
||
|
||
for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
|
||
*ptext = cpu_to_be32(*ptext);
|
||
init_key = 0;
|
||
}
|
||
|
||
lpfc_read_nv(phba, pmb);
|
||
memset((char*)mb->un.varRDnvp.rsvd3, 0,
|
||
sizeof (mb->un.varRDnvp.rsvd3));
|
||
memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
|
||
sizeof (licensed));
|
||
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||
|
||
if (rc != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0324 Config Port initialization "
|
||
"error, mbxCmd x%x READ_NVPARM, "
|
||
"mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -ERESTART;
|
||
}
|
||
memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
|
||
sizeof(phba->wwnn));
|
||
memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
|
||
sizeof(phba->wwpn));
|
||
}
|
||
|
||
/*
|
||
* Clear all option bits except LPFC_SLI3_BG_ENABLED,
|
||
* which was already set in lpfc_get_cfgparam()
|
||
*/
|
||
phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
|
||
|
||
/* Setup and issue mailbox READ REV command */
|
||
lpfc_read_rev(phba, pmb);
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||
if (rc != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0439 Adapter failed to init, mbxCmd x%x "
|
||
"READ_REV, mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
mempool_free( pmb, phba->mbox_mem_pool);
|
||
return -ERESTART;
|
||
}
|
||
|
||
|
||
/*
|
||
* The value of rr must be 1 since the driver set the cv field to 1.
|
||
* This setting requires the FW to set all revision fields.
|
||
*/
|
||
if (mb->un.varRdRev.rr == 0) {
|
||
vp->rev.rBit = 0;
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0440 Adapter failed to init, READ_REV has "
|
||
"missing revision information.\n");
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -ERESTART;
|
||
}
|
||
|
||
if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EINVAL;
|
||
}
|
||
|
||
/* Save information as VPD data */
|
||
vp->rev.rBit = 1;
|
||
memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
|
||
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
|
||
memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
|
||
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
|
||
memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
|
||
vp->rev.biuRev = mb->un.varRdRev.biuRev;
|
||
vp->rev.smRev = mb->un.varRdRev.smRev;
|
||
vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
|
||
vp->rev.endecRev = mb->un.varRdRev.endecRev;
|
||
vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
|
||
vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
|
||
vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
|
||
vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
|
||
vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
|
||
vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
|
||
|
||
/* If the sli feature level is less then 9, we must
|
||
* tear down all RPIs and VPIs on link down if NPIV
|
||
* is enabled.
|
||
*/
|
||
if (vp->rev.feaLevelHigh < 9)
|
||
phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
|
||
|
||
if (lpfc_is_LC_HBA(phba->pcidev->device))
|
||
memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
|
||
sizeof (phba->RandomData));
|
||
|
||
/* Get adapter VPD information */
|
||
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
|
||
if (!lpfc_vpd_data)
|
||
goto out_free_mbox;
|
||
do {
|
||
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||
|
||
if (rc != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"0441 VPD not present on adapter, "
|
||
"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
mb->un.varDmp.word_cnt = 0;
|
||
}
|
||
/* dump mem may return a zero when finished or we got a
|
||
* mailbox error, either way we are done.
|
||
*/
|
||
if (mb->un.varDmp.word_cnt == 0)
|
||
break;
|
||
|
||
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
|
||
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
|
||
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
|
||
lpfc_vpd_data + offset,
|
||
mb->un.varDmp.word_cnt);
|
||
offset += mb->un.varDmp.word_cnt;
|
||
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
|
||
|
||
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
|
||
|
||
kfree(lpfc_vpd_data);
|
||
out_free_mbox:
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @pmboxq: pointer to the driver internal queue element for mailbox command.
|
||
*
|
||
* This is the completion handler for driver's configuring asynchronous event
|
||
* mailbox command to the device. If the mailbox command returns successfully,
|
||
* it will set internal async event support flag to 1; otherwise, it will
|
||
* set internal async event support flag to 0.
|
||
**/
|
||
static void
|
||
lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
|
||
{
|
||
if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
|
||
phba->temp_sensor_support = 1;
|
||
else
|
||
phba->temp_sensor_support = 0;
|
||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @pmboxq: pointer to the driver internal queue element for mailbox command.
|
||
*
|
||
* This is the completion handler for dump mailbox command for getting
|
||
* wake up parameters. When this command complete, the response contain
|
||
* Option rom version of the HBA. This function translate the version number
|
||
* into a human readable string and store it in OptionROMVersion.
|
||
**/
|
||
static void
|
||
lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
||
{
|
||
struct prog_id *prg;
|
||
uint32_t prog_id_word;
|
||
char dist = ' ';
|
||
/* character array used for decoding dist type. */
|
||
char dist_char[] = "nabx";
|
||
|
||
if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
|
||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||
return;
|
||
}
|
||
|
||
prg = (struct prog_id *) &prog_id_word;
|
||
|
||
/* word 7 contain option rom version */
|
||
prog_id_word = pmboxq->u.mb.un.varWords[7];
|
||
|
||
/* Decode the Option rom version word to a readable string */
|
||
dist = dist_char[prg->dist];
|
||
|
||
if ((prg->dist == 3) && (prg->num == 0))
|
||
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
|
||
prg->ver, prg->rev, prg->lev);
|
||
else
|
||
snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
|
||
prg->ver, prg->rev, prg->lev,
|
||
dist, prg->num);
|
||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
|
||
* @vport: pointer to lpfc vport data structure.
|
||
*
|
||
*
|
||
* Return codes
|
||
* None.
|
||
**/
|
||
void
|
||
lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||
{
|
||
struct lpfc_hba *phba = vport->phba;
|
||
|
||
/*
|
||
* If the name is empty or there exists a soft name
|
||
* then copy the service params name, otherwise use the fc name
|
||
*/
|
||
if (vport->fc_nodename.u.wwn[0] == 0)
|
||
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
|
||
sizeof(struct lpfc_name));
|
||
else
|
||
memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
|
||
sizeof(struct lpfc_name));
|
||
|
||
/*
|
||
* If the port name has changed, then set the Param changes flag
|
||
* to unreg the login
|
||
*/
|
||
if (vport->fc_portname.u.wwn[0] != 0 &&
|
||
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
|
||
sizeof(struct lpfc_name))) {
|
||
vport->vport_flag |= FAWWPN_PARAM_CHG;
|
||
|
||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||
vport->port_type == LPFC_PHYSICAL_PORT &&
|
||
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
|
||
if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
|
||
phba->sli4_hba.fawwpn_flag &=
|
||
~LPFC_FAWWPN_FABRIC;
|
||
lpfc_printf_log(phba, KERN_INFO,
|
||
LOG_SLI | LOG_DISCOVERY | LOG_ELS,
|
||
"2701 FA-PWWN change WWPN from %llx to "
|
||
"%llx: vflag x%x fawwpn_flag x%x\n",
|
||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||
wwn_to_u64
|
||
(vport->fc_sparam.portName.u.wwn),
|
||
vport->vport_flag,
|
||
phba->sli4_hba.fawwpn_flag);
|
||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||
sizeof(struct lpfc_name));
|
||
}
|
||
}
|
||
|
||
if (vport->fc_portname.u.wwn[0] == 0)
|
||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||
sizeof(struct lpfc_name));
|
||
else
|
||
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
|
||
sizeof(struct lpfc_name));
|
||
}
|
||
|
||
/**
|
||
* lpfc_config_port_post - Perform lpfc initialization after config port
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine will do LPFC initialization after the CONFIG_PORT mailbox
|
||
* command call. It performs all internal resource and state setups on the
|
||
* port: post IOCB buffers, enable appropriate host interrupt attentions,
|
||
* ELS ring timers, etc.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
int
|
||
lpfc_config_port_post(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||
LPFC_MBOXQ_t *pmb;
|
||
MAILBOX_t *mb;
|
||
struct lpfc_dmabuf *mp;
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
uint32_t status, timeout;
|
||
int i, j;
|
||
int rc;
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
/*
|
||
* If the Config port completed correctly the HBA is not
|
||
* over heated any more.
|
||
*/
|
||
if (phba->over_temp_state == HBA_OVER_TEMP)
|
||
phba->over_temp_state = HBA_NORMAL_TEMP;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
mb = &pmb->u.mb;
|
||
|
||
/* Get login parameters for NID. */
|
||
rc = lpfc_read_sparam(phba, pmb, 0);
|
||
if (rc) {
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -ENOMEM;
|
||
}
|
||
|
||
pmb->vport = vport;
|
||
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0448 Adapter failed init, mbxCmd x%x "
|
||
"READ_SPARM mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||
return -EIO;
|
||
}
|
||
|
||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||
|
||
/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
|
||
* longer needed. Prevent unintended ctx_buf access as the mbox is
|
||
* reused.
|
||
*/
|
||
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
|
||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||
kfree(mp);
|
||
pmb->ctx_buf = NULL;
|
||
lpfc_update_vport_wwn(vport);
|
||
|
||
/* Update the fc_host data structures with new wwn. */
|
||
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
|
||
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
|
||
fc_host_max_npiv_vports(shost) = phba->max_vpi;
|
||
|
||
/* If no serial number in VPD data, use low 6 bytes of WWNN */
|
||
/* This should be consolidated into parse_vpd ? - mr */
|
||
if (phba->SerialNumber[0] == 0) {
|
||
uint8_t *outptr;
|
||
|
||
outptr = &vport->fc_nodename.u.s.IEEE[0];
|
||
for (i = 0; i < 12; i++) {
|
||
status = *outptr++;
|
||
j = ((status & 0xf0) >> 4);
|
||
if (j <= 9)
|
||
phba->SerialNumber[i] =
|
||
(char)((uint8_t) 0x30 + (uint8_t) j);
|
||
else
|
||
phba->SerialNumber[i] =
|
||
(char)((uint8_t) 0x61 + (uint8_t) (j - 10));
|
||
i++;
|
||
j = (status & 0xf);
|
||
if (j <= 9)
|
||
phba->SerialNumber[i] =
|
||
(char)((uint8_t) 0x30 + (uint8_t) j);
|
||
else
|
||
phba->SerialNumber[i] =
|
||
(char)((uint8_t) 0x61 + (uint8_t) (j - 10));
|
||
}
|
||
}
|
||
|
||
lpfc_read_config(phba, pmb);
|
||
pmb->vport = vport;
|
||
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0453 Adapter failed to init, mbxCmd x%x "
|
||
"READ_CONFIG, mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
mempool_free( pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
|
||
/* Check if the port is disabled */
|
||
lpfc_sli_read_link_ste(phba);
|
||
|
||
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
|
||
if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
|
||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||
"3359 HBA queue depth changed from %d to %d\n",
|
||
phba->cfg_hba_queue_depth,
|
||
mb->un.varRdConfig.max_xri);
|
||
phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
|
||
}
|
||
|
||
phba->lmt = mb->un.varRdConfig.lmt;
|
||
|
||
/* Get the default values for Model Name and Description */
|
||
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
|
||
|
||
phba->link_state = LPFC_LINK_DOWN;
|
||
|
||
/* Only process IOCBs on ELS ring till hba_state is READY */
|
||
if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
|
||
psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
||
if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
|
||
psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
||
|
||
/* Post receive buffers for desired rings */
|
||
if (phba->sli_rev != 3)
|
||
lpfc_post_rcv_buf(phba);
|
||
|
||
/*
|
||
* Configure HBA MSI-X attention conditions to messages if MSI-X mode
|
||
*/
|
||
if (phba->intr_type == MSIX) {
|
||
rc = lpfc_config_msi(phba, pmb);
|
||
if (rc) {
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||
if (rc != MBX_SUCCESS) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0352 Config MSI mailbox command "
|
||
"failed, mbxCmd x%x, mbxStatus x%x\n",
|
||
pmb->u.mb.mbxCommand,
|
||
pmb->u.mb.mbxStatus);
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
}
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
/* Initialize ERATT handling flag */
|
||
phba->hba_flag &= ~HBA_ERATT_HANDLED;
|
||
|
||
/* Enable appropriate host interrupts */
|
||
if (lpfc_readl(phba->HCregaddr, &status)) {
|
||
spin_unlock_irq(&phba->hbalock);
|
||
return -EIO;
|
||
}
|
||
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
|
||
if (psli->num_rings > 0)
|
||
status |= HC_R0INT_ENA;
|
||
if (psli->num_rings > 1)
|
||
status |= HC_R1INT_ENA;
|
||
if (psli->num_rings > 2)
|
||
status |= HC_R2INT_ENA;
|
||
if (psli->num_rings > 3)
|
||
status |= HC_R3INT_ENA;
|
||
|
||
if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
|
||
(phba->cfg_poll & DISABLE_FCP_RING_INT))
|
||
status &= ~(HC_R0INT_ENA);
|
||
|
||
writel(status, phba->HCregaddr);
|
||
readl(phba->HCregaddr); /* flush */
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
/* Set up ring-0 (ELS) timer */
|
||
timeout = phba->fc_ratov * 2;
|
||
mod_timer(&vport->els_tmofunc,
|
||
jiffies + msecs_to_jiffies(1000 * timeout));
|
||
/* Set up heart beat (HB) timer */
|
||
mod_timer(&phba->hb_tmofunc,
|
||
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
|
||
phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
|
||
phba->last_completion_time = jiffies;
|
||
/* Set up error attention (ERATT) polling timer */
|
||
mod_timer(&phba->eratt_poll,
|
||
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
|
||
|
||
if (phba->hba_flag & LINK_DISABLED) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"2598 Adapter Link is disabled.\n");
|
||
lpfc_down_link(phba, pmb);
|
||
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"2599 Adapter failed to issue DOWN_LINK"
|
||
" mbox command rc 0x%x\n", rc);
|
||
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
|
||
if (rc)
|
||
return rc;
|
||
}
|
||
/* MBOX buffer will be freed in mbox compl */
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
|
||
lpfc_config_async(phba, pmb, LPFC_ELS_RING);
|
||
pmb->mbox_cmpl = lpfc_config_async_cmpl;
|
||
pmb->vport = phba->pport;
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||
|
||
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0456 Adapter failed to issue "
|
||
"ASYNCEVT_ENABLE mbox status x%x\n",
|
||
rc);
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
}
|
||
|
||
/* Get Option rom version */
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
|
||
lpfc_dump_wakeup_param(phba, pmb);
|
||
pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
|
||
pmb->vport = phba->pport;
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||
|
||
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0435 Adapter failed "
|
||
"to get Option ROM version status x%x\n", rc);
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_refresh_params - update driver copy of params.
|
||
* @phba: Pointer to HBA context object.
|
||
*
|
||
* This is called to refresh driver copy of dynamic fields from the
|
||
* common_get_sli4_parameters descriptor.
|
||
**/
|
||
int
|
||
lpfc_sli4_refresh_params(struct lpfc_hba *phba)
|
||
{
|
||
LPFC_MBOXQ_t *mboxq;
|
||
struct lpfc_mqe *mqe;
|
||
struct lpfc_sli4_parameters *mbx_sli4_parameters;
|
||
int length, rc;
|
||
|
||
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!mboxq)
|
||
return -ENOMEM;
|
||
|
||
mqe = &mboxq->u.mqe;
|
||
/* Read the port's SLI4 Config Parameters */
|
||
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
|
||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||
LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
|
||
length, LPFC_SLI4_MBX_EMBED);
|
||
|
||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||
if (unlikely(rc)) {
|
||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||
return rc;
|
||
}
|
||
mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
|
||
phba->sli4_hba.pc_sli4_params.mi_cap =
|
||
bf_get(cfg_mi_ver, mbx_sli4_parameters);
|
||
|
||
/* Are we forcing MI off via module parameter? */
|
||
if (phba->cfg_enable_mi)
|
||
phba->sli4_hba.pc_sli4_params.mi_ver =
|
||
bf_get(cfg_mi_ver, mbx_sli4_parameters);
|
||
else
|
||
phba->sli4_hba.pc_sli4_params.mi_ver = 0;
|
||
|
||
phba->sli4_hba.pc_sli4_params.cmf =
|
||
bf_get(cfg_cmf, mbx_sli4_parameters);
|
||
phba->sli4_hba.pc_sli4_params.pls =
|
||
bf_get(cfg_pvl, mbx_sli4_parameters);
|
||
|
||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||
return rc;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_init_link - Initialize the FC link
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
|
||
*
|
||
* This routine will issue the INIT_LINK mailbox command call.
|
||
* It is available to other drivers through the lpfc_hba data
|
||
* structure for use as a delayed link up mechanism with the
|
||
* module parameter lpfc_suppress_link_up.
|
||
*
|
||
* Return code
|
||
* 0 - success
|
||
* Any other value - error
|
||
**/
|
||
static int
|
||
lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
|
||
{
|
||
return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @fc_topology: desired fc topology.
|
||
* @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
|
||
*
|
||
* This routine will issue the INIT_LINK mailbox command call.
|
||
* It is available to other drivers through the lpfc_hba data
|
||
* structure for use as a delayed link up mechanism with the
|
||
* module parameter lpfc_suppress_link_up.
|
||
*
|
||
* Return code
|
||
* 0 - success
|
||
* Any other value - error
|
||
**/
|
||
int
|
||
lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
|
||
uint32_t flag)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
LPFC_MBOXQ_t *pmb;
|
||
MAILBOX_t *mb;
|
||
int rc;
|
||
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
mb = &pmb->u.mb;
|
||
pmb->vport = vport;
|
||
|
||
if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
|
||
!(phba->lmt & LMT_1Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
|
||
!(phba->lmt & LMT_2Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
|
||
!(phba->lmt & LMT_4Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
|
||
!(phba->lmt & LMT_8Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
|
||
!(phba->lmt & LMT_10Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
|
||
!(phba->lmt & LMT_16Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
|
||
!(phba->lmt & LMT_32Gb)) ||
|
||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
|
||
!(phba->lmt & LMT_64Gb))) {
|
||
/* Reset link speed to auto */
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"1302 Invalid speed for this board:%d "
|
||
"Reset link speed to auto.\n",
|
||
phba->cfg_link_speed);
|
||
phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
|
||
}
|
||
lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
|
||
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||
lpfc_set_loopback_flag(phba);
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
|
||
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0498 Adapter failed to init, mbxCmd x%x "
|
||
"INIT_LINK, mbxStatus x%x\n",
|
||
mb->mbxCommand, mb->mbxStatus);
|
||
if (phba->sli_rev <= LPFC_SLI_REV3) {
|
||
/* Clear all interrupt enable conditions */
|
||
writel(0, phba->HCregaddr);
|
||
readl(phba->HCregaddr); /* flush */
|
||
/* Clear all pending interrupts */
|
||
writel(0xffffffff, phba->HAregaddr);
|
||
readl(phba->HAregaddr); /* flush */
|
||
}
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
if (rc != MBX_BUSY || flag == MBX_POLL)
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
|
||
if (flag == MBX_POLL)
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_down_link - this routine downs the FC link
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
|
||
*
|
||
* This routine will issue the DOWN_LINK mailbox command call.
|
||
* It is available to other drivers through the lpfc_hba data
|
||
* structure for use to stop the link.
|
||
*
|
||
* Return code
|
||
* 0 - success
|
||
* Any other value - error
|
||
**/
|
||
static int
|
||
lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
|
||
{
|
||
LPFC_MBOXQ_t *pmb;
|
||
int rc;
|
||
|
||
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return -ENOMEM;
|
||
}
|
||
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0491 Adapter Link is disabled.\n");
|
||
lpfc_down_link(phba, pmb);
|
||
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
|
||
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"2522 Adapter failed to issue DOWN_LINK"
|
||
" mbox command rc 0x%x\n", rc);
|
||
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
return -EIO;
|
||
}
|
||
if (flag == MBX_POLL)
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will do LPFC uninitialization before the HBA is reset when
|
||
* bringing down the SLI Layer.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
int
|
||
lpfc_hba_down_prep(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport **vports;
|
||
int i;
|
||
|
||
if (phba->sli_rev <= LPFC_SLI_REV3) {
|
||
/* Disable interrupts */
|
||
writel(0, phba->HCregaddr);
|
||
readl(phba->HCregaddr); /* flush */
|
||
}
|
||
|
||
if (phba->pport->load_flag & FC_UNLOADING)
|
||
lpfc_cleanup_discovery_resources(phba->pport);
|
||
else {
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL)
|
||
for (i = 0; i <= phba->max_vports &&
|
||
vports[i] != NULL; i++)
|
||
lpfc_cleanup_discovery_resources(vports[i]);
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
}
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
|
||
* rspiocb which got deferred
|
||
*
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will cleanup completed slow path events after HBA is reset
|
||
* when bringing down the SLI Layer.
|
||
*
|
||
*
|
||
* Return codes
|
||
* void.
|
||
**/
|
||
static void
|
||
lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_iocbq *rspiocbq;
|
||
struct hbq_dmabuf *dmabuf;
|
||
struct lpfc_cq_event *cq_event;
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
|
||
/* Get the response iocb from the head of work queue */
|
||
spin_lock_irq(&phba->hbalock);
|
||
list_remove_head(&phba->sli4_hba.sp_queue_event,
|
||
cq_event, struct lpfc_cq_event, list);
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
|
||
case CQE_CODE_COMPL_WQE:
|
||
rspiocbq = container_of(cq_event, struct lpfc_iocbq,
|
||
cq_event);
|
||
lpfc_sli_release_iocbq(phba, rspiocbq);
|
||
break;
|
||
case CQE_CODE_RECEIVE:
|
||
case CQE_CODE_RECEIVE_V1:
|
||
dmabuf = container_of(cq_event, struct hbq_dmabuf,
|
||
cq_event);
|
||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||
}
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will cleanup posted ELS buffers after the HBA is reset
|
||
* when bringing down the SLI Layer.
|
||
*
|
||
*
|
||
* Return codes
|
||
* void.
|
||
**/
|
||
static void
|
||
lpfc_hba_free_post_buf(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
struct lpfc_sli_ring *pring;
|
||
struct lpfc_dmabuf *mp, *next_mp;
|
||
LIST_HEAD(buflist);
|
||
int count;
|
||
|
||
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
|
||
lpfc_sli_hbqbuf_free_all(phba);
|
||
else {
|
||
/* Cleanup preposted buffers on the ELS ring */
|
||
pring = &psli->sli3_ring[LPFC_ELS_RING];
|
||
spin_lock_irq(&phba->hbalock);
|
||
list_splice_init(&pring->postbufq, &buflist);
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
count = 0;
|
||
list_for_each_entry_safe(mp, next_mp, &buflist, list) {
|
||
list_del(&mp->list);
|
||
count++;
|
||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||
kfree(mp);
|
||
}
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
pring->postbufq_cnt -= count;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will cleanup the txcmplq after the HBA is reset when bringing
|
||
* down the SLI Layer.
|
||
*
|
||
* Return codes
|
||
* void
|
||
**/
|
||
static void
|
||
lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
struct lpfc_queue *qp = NULL;
|
||
struct lpfc_sli_ring *pring;
|
||
LIST_HEAD(completions);
|
||
int i;
|
||
struct lpfc_iocbq *piocb, *next_iocb;
|
||
|
||
if (phba->sli_rev != LPFC_SLI_REV4) {
|
||
for (i = 0; i < psli->num_rings; i++) {
|
||
pring = &psli->sli3_ring[i];
|
||
spin_lock_irq(&phba->hbalock);
|
||
/* At this point in time the HBA is either reset or DOA
|
||
* Nothing should be on txcmplq as it will
|
||
* NEVER complete.
|
||
*/
|
||
list_splice_init(&pring->txcmplq, &completions);
|
||
pring->txcmplq_cnt = 0;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
lpfc_sli_abort_iocb_ring(phba, pring);
|
||
}
|
||
/* Cancel all the IOCBs from the completions list */
|
||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
||
return;
|
||
}
|
||
list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
|
||
pring = qp->pring;
|
||
if (!pring)
|
||
continue;
|
||
spin_lock_irq(&pring->ring_lock);
|
||
list_for_each_entry_safe(piocb, next_iocb,
|
||
&pring->txcmplq, list)
|
||
piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
|
||
list_splice_init(&pring->txcmplq, &completions);
|
||
pring->txcmplq_cnt = 0;
|
||
spin_unlock_irq(&pring->ring_lock);
|
||
lpfc_sli_abort_iocb_ring(phba, pring);
|
||
}
|
||
/* Cancel all the IOCBs from the completions list */
|
||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will do uninitialization after the HBA is reset when bring
|
||
* down the SLI Layer.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
static int
|
||
lpfc_hba_down_post_s3(struct lpfc_hba *phba)
|
||
{
|
||
lpfc_hba_free_post_buf(phba);
|
||
lpfc_hba_clean_txcmplq(phba);
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine will do uninitialization after the HBA is reset when bring
|
||
* down the SLI Layer.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
static int
|
||
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_io_buf *psb, *psb_next;
|
||
struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
LIST_HEAD(aborts);
|
||
LIST_HEAD(nvme_aborts);
|
||
LIST_HEAD(nvmet_aborts);
|
||
struct lpfc_sglq *sglq_entry = NULL;
|
||
int cnt, idx;
|
||
|
||
|
||
lpfc_sli_hbqbuf_free_all(phba);
|
||
lpfc_hba_clean_txcmplq(phba);
|
||
|
||
/* At this point in time the HBA is either reset or DOA. Either
|
||
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
|
||
* on the lpfc_els_sgl_list so that it can either be freed if the
|
||
* driver is unloading or reposted if the driver is restarting
|
||
* the port.
|
||
*/
|
||
|
||
/* sgl_list_lock required because worker thread uses this
|
||
* list.
|
||
*/
|
||
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
|
||
list_for_each_entry(sglq_entry,
|
||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
|
||
sglq_entry->state = SGL_FREED;
|
||
|
||
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
|
||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||
|
||
|
||
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
|
||
|
||
/* abts_xxxx_buf_list_lock required because worker thread uses this
|
||
* list.
|
||
*/
|
||
spin_lock_irq(&phba->hbalock);
|
||
cnt = 0;
|
||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
|
||
qp = &phba->sli4_hba.hdwq[idx];
|
||
|
||
spin_lock(&qp->abts_io_buf_list_lock);
|
||
list_splice_init(&qp->lpfc_abts_io_buf_list,
|
||
&aborts);
|
||
|
||
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
|
||
psb->pCmd = NULL;
|
||
psb->status = IOSTAT_SUCCESS;
|
||
cnt++;
|
||
}
|
||
spin_lock(&qp->io_buf_list_put_lock);
|
||
list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
|
||
qp->put_io_bufs += qp->abts_scsi_io_bufs;
|
||
qp->put_io_bufs += qp->abts_nvme_io_bufs;
|
||
qp->abts_scsi_io_bufs = 0;
|
||
qp->abts_nvme_io_bufs = 0;
|
||
spin_unlock(&qp->io_buf_list_put_lock);
|
||
spin_unlock(&qp->abts_io_buf_list_lock);
|
||
}
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||
spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||
list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||
&nvmet_aborts);
|
||
spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
|
||
ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
|
||
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
||
}
|
||
}
|
||
|
||
lpfc_sli4_free_sp_events(phba);
|
||
return cnt;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_down_post - Wrapper func for hba down post routine
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine wraps the actual SLI3 or SLI4 routine for performing
|
||
* uninitialization after the HBA is reset when bring down the SLI Layer.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
int
|
||
lpfc_hba_down_post(struct lpfc_hba *phba)
|
||
{
|
||
return (*phba->lpfc_hba_down_post)(phba);
|
||
}
|
||
|
||
/**
|
||
* lpfc_hb_timeout - The HBA-timer timeout handler
|
||
* @t: timer context used to obtain the pointer to lpfc hba data structure.
|
||
*
|
||
* This is the HBA-timer timeout handler registered to the lpfc driver. When
|
||
* this timer fires, a HBA timeout event shall be posted to the lpfc driver
|
||
* work-port-events bitmap and the worker thread is notified. This timeout
|
||
* event will be used by the worker thread to invoke the actual timeout
|
||
* handler routine, lpfc_hb_timeout_handler. Any periodical operations will
|
||
* be performed in the timeout handler and the HBA timeout event bit shall
|
||
* be cleared by the worker thread after it has taken the event bitmap out.
|
||
**/
|
||
static void
|
||
lpfc_hb_timeout(struct timer_list *t)
|
||
{
|
||
struct lpfc_hba *phba;
|
||
uint32_t tmo_posted;
|
||
unsigned long iflag;
|
||
|
||
phba = from_timer(phba, t, hb_tmofunc);
|
||
|
||
/* Check for heart beat timeout conditions */
|
||
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
|
||
tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
|
||
if (!tmo_posted)
|
||
phba->pport->work_port_events |= WORKER_HB_TMO;
|
||
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
|
||
|
||
/* Tell the worker thread there is work to do */
|
||
if (!tmo_posted)
|
||
lpfc_worker_wake_up(phba);
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_rrq_timeout - The RRQ-timer timeout handler
|
||
* @t: timer context used to obtain the pointer to lpfc hba data structure.
|
||
*
|
||
* This is the RRQ-timer timeout handler registered to the lpfc driver. When
|
||
* this timer fires, a RRQ timeout event shall be posted to the lpfc driver
|
||
* work-port-events bitmap and the worker thread is notified. This timeout
|
||
* event will be used by the worker thread to invoke the actual timeout
|
||
* handler routine, lpfc_rrq_handler. Any periodical operations will
|
||
* be performed in the timeout handler and the RRQ timeout event bit shall
|
||
* be cleared by the worker thread after it has taken the event bitmap out.
|
||
**/
|
||
static void
|
||
lpfc_rrq_timeout(struct timer_list *t)
|
||
{
|
||
struct lpfc_hba *phba;
|
||
unsigned long iflag;
|
||
|
||
phba = from_timer(phba, t, rrq_tmr);
|
||
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
|
||
if (!(phba->pport->load_flag & FC_UNLOADING))
|
||
phba->hba_flag |= HBA_RRQ_ACTIVE;
|
||
else
|
||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
|
||
|
||
if (!(phba->pport->load_flag & FC_UNLOADING))
|
||
lpfc_worker_wake_up(phba);
|
||
}
|
||
|
||
/**
|
||
* lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @pmboxq: pointer to the driver internal queue element for mailbox command.
|
||
*
|
||
* This is the callback function to the lpfc heart-beat mailbox command.
|
||
* If configured, the lpfc driver issues the heart-beat mailbox command to
|
||
* the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
|
||
* heart-beat mailbox command is issued, the driver shall set up heart-beat
|
||
* timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
|
||
* heart-beat outstanding state. Once the mailbox command comes back and
|
||
* no error conditions detected, the heart-beat mailbox command timer is
|
||
* reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
|
||
* state is cleared for the next heart-beat. If the timer expired with the
|
||
* heart-beat outstanding state set, the driver will put the HBA offline.
|
||
**/
|
||
static void
|
||
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
|
||
{
|
||
unsigned long drvr_flag;
|
||
|
||
spin_lock_irqsave(&phba->hbalock, drvr_flag);
|
||
phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
|
||
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
|
||
|
||
/* Check and reset heart-beat timer if necessary */
|
||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
|
||
!(phba->link_state == LPFC_HBA_ERROR) &&
|
||
!(phba->pport->load_flag & FC_UNLOADING))
|
||
mod_timer(&phba->hb_tmofunc,
|
||
jiffies +
|
||
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
|
||
return;
|
||
}
|
||
|
||
/*
|
||
* lpfc_idle_stat_delay_work - idle_stat tracking
|
||
*
|
||
* This routine tracks per-cq idle_stat and determines polling decisions.
|
||
*
|
||
* Return codes:
|
||
* None
|
||
**/
|
||
static void
|
||
lpfc_idle_stat_delay_work(struct work_struct *work)
|
||
{
|
||
struct lpfc_hba *phba = container_of(to_delayed_work(work),
|
||
struct lpfc_hba,
|
||
idle_stat_delay_work);
|
||
struct lpfc_queue *cq;
|
||
struct lpfc_sli4_hdw_queue *hdwq;
|
||
struct lpfc_idle_stat *idle_stat;
|
||
u32 i, idle_percent;
|
||
u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
|
||
|
||
if (phba->pport->load_flag & FC_UNLOADING)
|
||
return;
|
||
|
||
if (phba->link_state == LPFC_HBA_ERROR ||
|
||
phba->pport->fc_flag & FC_OFFLINE_MODE ||
|
||
phba->cmf_active_mode != LPFC_CFG_OFF)
|
||
goto requeue;
|
||
|
||
for_each_present_cpu(i) {
|
||
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
|
||
cq = hdwq->io_cq;
|
||
|
||
/* Skip if we've already handled this cq's primary CPU */
|
||
if (cq->chann != i)
|
||
continue;
|
||
|
||
idle_stat = &phba->sli4_hba.idle_stat[i];
|
||
|
||
/* get_cpu_idle_time returns values as running counters. Thus,
|
||
* to know the amount for this period, the prior counter values
|
||
* need to be subtracted from the current counter values.
|
||
* From there, the idle time stat can be calculated as a
|
||
* percentage of 100 - the sum of the other consumption times.
|
||
*/
|
||
wall_idle = get_cpu_idle_time(i, &wall, 1);
|
||
diff_idle = wall_idle - idle_stat->prev_idle;
|
||
diff_wall = wall - idle_stat->prev_wall;
|
||
|
||
if (diff_wall <= diff_idle)
|
||
busy_time = 0;
|
||
else
|
||
busy_time = diff_wall - diff_idle;
|
||
|
||
idle_percent = div64_u64(100 * busy_time, diff_wall);
|
||
idle_percent = 100 - idle_percent;
|
||
|
||
if (idle_percent < 15)
|
||
cq->poll_mode = LPFC_QUEUE_WORK;
|
||
else
|
||
cq->poll_mode = LPFC_IRQ_POLL;
|
||
|
||
idle_stat->prev_idle = wall_idle;
|
||
idle_stat->prev_wall = wall;
|
||
}
|
||
|
||
requeue:
|
||
schedule_delayed_work(&phba->idle_stat_delay_work,
|
||
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
|
||
}
|
||
|
||
static void
|
||
lpfc_hb_eq_delay_work(struct work_struct *work)
|
||
{
|
||
struct lpfc_hba *phba = container_of(to_delayed_work(work),
|
||
struct lpfc_hba, eq_delay_work);
|
||
struct lpfc_eq_intr_info *eqi, *eqi_new;
|
||
struct lpfc_queue *eq, *eq_next;
|
||
unsigned char *ena_delay = NULL;
|
||
uint32_t usdelay;
|
||
int i;
|
||
|
||
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
|
||
return;
|
||
|
||
if (phba->link_state == LPFC_HBA_ERROR ||
|
||
phba->pport->fc_flag & FC_OFFLINE_MODE)
|
||
goto requeue;
|
||
|
||
ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
|
||
GFP_KERNEL);
|
||
if (!ena_delay)
|
||
goto requeue;
|
||
|
||
for (i = 0; i < phba->cfg_irq_chann; i++) {
|
||
/* Get the EQ corresponding to the IRQ vector */
|
||
eq = phba->sli4_hba.hba_eq_hdl[i].eq;
|
||
if (!eq)
|
||
continue;
|
||
if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
|
||
eq->q_flag &= ~HBA_EQ_DELAY_CHK;
|
||
ena_delay[eq->last_cpu] = 1;
|
||
}
|
||
}
|
||
|
||
for_each_present_cpu(i) {
|
||
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
|
||
if (ena_delay[i]) {
|
||
usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
|
||
if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
|
||
usdelay = LPFC_MAX_AUTO_EQ_DELAY;
|
||
} else {
|
||
usdelay = 0;
|
||
}
|
||
|
||
eqi->icnt = 0;
|
||
|
||
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
|
||
if (unlikely(eq->last_cpu != i)) {
|
||
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
|
||
eq->last_cpu);
|
||
list_move_tail(&eq->cpu_list, &eqi_new->list);
|
||
continue;
|
||
}
|
||
if (usdelay != eq->q_mode)
|
||
lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
|
||
usdelay);
|
||
}
|
||
}
|
||
|
||
kfree(ena_delay);
|
||
|
||
requeue:
|
||
queue_delayed_work(phba->wq, &phba->eq_delay_work,
|
||
msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
|
||
}
|
||
|
||
/**
|
||
* lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* For each heartbeat, this routine does some heuristic methods to adjust
|
||
* XRI distribution. The goal is to fully utilize free XRIs.
|
||
**/
|
||
static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
|
||
{
|
||
u32 i;
|
||
u32 hwq_count;
|
||
|
||
hwq_count = phba->cfg_hdw_queue;
|
||
for (i = 0; i < hwq_count; i++) {
|
||
/* Adjust XRIs in private pool */
|
||
lpfc_adjust_pvt_pool_count(phba, i);
|
||
|
||
/* Adjust high watermark */
|
||
lpfc_adjust_high_watermark(phba, i);
|
||
|
||
#ifdef LPFC_MXP_STAT
|
||
/* Snapshot pbl, pvt and busy count */
|
||
lpfc_snapshot_mxp(phba, i);
|
||
#endif
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_issue_hb_mbox - Issues heart-beat mailbox command
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* If a HB mbox is not already in progrees, this routine will allocate
|
||
* a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
|
||
* and issue it. The HBA_HBEAT_INP flag means the command is in progress.
|
||
**/
|
||
int
|
||
lpfc_issue_hb_mbox(struct lpfc_hba *phba)
|
||
{
|
||
LPFC_MBOXQ_t *pmboxq;
|
||
int retval;
|
||
|
||
/* Is a Heartbeat mbox already in progress */
|
||
if (phba->hba_flag & HBA_HBEAT_INP)
|
||
return 0;
|
||
|
||
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmboxq)
|
||
return -ENOMEM;
|
||
|
||
lpfc_heart_beat(phba, pmboxq);
|
||
pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
|
||
pmboxq->vport = phba->pport;
|
||
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
|
||
|
||
if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
|
||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||
return -ENXIO;
|
||
}
|
||
phba->hba_flag |= HBA_HBEAT_INP;
|
||
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
|
||
* flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
|
||
* of the value of lpfc_enable_hba_heartbeat.
|
||
* If lpfc_enable_hba_heartbeat is set, the timeout routine will always
|
||
* try to issue a MBX_HEARTBEAT mbox command.
|
||
**/
|
||
void
|
||
lpfc_issue_hb_tmo(struct lpfc_hba *phba)
|
||
{
|
||
if (phba->cfg_enable_hba_heartbeat)
|
||
return;
|
||
phba->hba_flag |= HBA_HBEAT_TMO;
|
||
}
|
||
|
||
/**
|
||
* lpfc_hb_timeout_handler - The HBA-timer timeout handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This is the actual HBA-timer timeout handler to be invoked by the worker
|
||
* thread whenever the HBA timer fired and HBA-timeout event posted. This
|
||
* handler performs any periodic operations needed for the device. If such
|
||
* periodic event has already been attended to either in the interrupt handler
|
||
* or by processing slow-ring or fast-ring events within the HBA-timer
|
||
* timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
|
||
* the timer for the next timeout period. If lpfc heart-beat mailbox command
|
||
* is configured and there is no heart-beat mailbox command outstanding, a
|
||
* heart-beat mailbox is issued and timer set properly. Otherwise, if there
|
||
* has been a heart-beat mailbox command outstanding, the HBA shall be put
|
||
* to offline.
|
||
**/
|
||
void
|
||
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport **vports;
|
||
struct lpfc_dmabuf *buf_ptr;
|
||
int retval = 0;
|
||
int i, tmo;
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
LIST_HEAD(completions);
|
||
|
||
if (phba->cfg_xri_rebalancing) {
|
||
/* Multi-XRI pools handler */
|
||
lpfc_hb_mxp_handler(phba);
|
||
}
|
||
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL)
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||
lpfc_rcv_seq_check_edtov(vports[i]);
|
||
lpfc_fdmi_change_check(vports[i]);
|
||
}
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
|
||
if ((phba->link_state == LPFC_HBA_ERROR) ||
|
||
(phba->pport->load_flag & FC_UNLOADING) ||
|
||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
|
||
return;
|
||
|
||
if (phba->elsbuf_cnt &&
|
||
(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
|
||
spin_lock_irq(&phba->hbalock);
|
||
list_splice_init(&phba->elsbuf, &completions);
|
||
phba->elsbuf_cnt = 0;
|
||
phba->elsbuf_prev_cnt = 0;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
while (!list_empty(&completions)) {
|
||
list_remove_head(&completions, buf_ptr,
|
||
struct lpfc_dmabuf, list);
|
||
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
|
||
kfree(buf_ptr);
|
||
}
|
||
}
|
||
phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
|
||
|
||
/* If there is no heart beat outstanding, issue a heartbeat command */
|
||
if (phba->cfg_enable_hba_heartbeat) {
|
||
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
|
||
spin_lock_irq(&phba->pport->work_port_lock);
|
||
if (time_after(phba->last_completion_time +
|
||
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
|
||
jiffies)) {
|
||
spin_unlock_irq(&phba->pport->work_port_lock);
|
||
if (phba->hba_flag & HBA_HBEAT_INP)
|
||
tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
|
||
else
|
||
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
|
||
goto out;
|
||
}
|
||
spin_unlock_irq(&phba->pport->work_port_lock);
|
||
|
||
/* Check if a MBX_HEARTBEAT is already in progress */
|
||
if (phba->hba_flag & HBA_HBEAT_INP) {
|
||
/*
|
||
* If heart beat timeout called with HBA_HBEAT_INP set
|
||
* we need to give the hb mailbox cmd a chance to
|
||
* complete or TMO.
|
||
*/
|
||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||
"0459 Adapter heartbeat still outstanding: "
|
||
"last compl time was %d ms.\n",
|
||
jiffies_to_msecs(jiffies
|
||
- phba->last_completion_time));
|
||
tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
|
||
} else {
|
||
if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
|
||
(list_empty(&psli->mboxq))) {
|
||
|
||
retval = lpfc_issue_hb_mbox(phba);
|
||
if (retval) {
|
||
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
|
||
goto out;
|
||
}
|
||
phba->skipped_hb = 0;
|
||
} else if (time_before_eq(phba->last_completion_time,
|
||
phba->skipped_hb)) {
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"2857 Last completion time not "
|
||
" updated in %d ms\n",
|
||
jiffies_to_msecs(jiffies
|
||
- phba->last_completion_time));
|
||
} else
|
||
phba->skipped_hb = jiffies;
|
||
|
||
tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
|
||
goto out;
|
||
}
|
||
} else {
|
||
/* Check to see if we want to force a MBX_HEARTBEAT */
|
||
if (phba->hba_flag & HBA_HBEAT_TMO) {
|
||
retval = lpfc_issue_hb_mbox(phba);
|
||
if (retval)
|
||
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
|
||
else
|
||
tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
|
||
goto out;
|
||
}
|
||
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
|
||
}
|
||
out:
|
||
mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
|
||
}
|
||
|
||
/**
|
||
* lpfc_offline_eratt - Bring lpfc offline on hardware error attention
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is called to bring the HBA offline when HBA hardware error
|
||
* other than Port Error 6 has been detected.
|
||
**/
|
||
static void
|
||
lpfc_offline_eratt(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||
|
||
lpfc_offline(phba);
|
||
lpfc_reset_barrier(phba);
|
||
spin_lock_irq(&phba->hbalock);
|
||
lpfc_sli_brdreset(phba);
|
||
spin_unlock_irq(&phba->hbalock);
|
||
lpfc_hba_down_post(phba);
|
||
lpfc_sli_brdready(phba, HS_MBRDY);
|
||
lpfc_unblock_mgmt_io(phba);
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is called to bring a SLI4 HBA offline when HBA hardware error
|
||
* other than Port Error 6 has been detected.
|
||
**/
|
||
void
|
||
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
|
||
{
|
||
spin_lock_irq(&phba->hbalock);
|
||
if (phba->link_state == LPFC_HBA_ERROR &&
|
||
test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
|
||
spin_unlock_irq(&phba->hbalock);
|
||
return;
|
||
}
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||
lpfc_sli_flush_io_rings(phba);
|
||
lpfc_offline(phba);
|
||
lpfc_hba_down_post(phba);
|
||
lpfc_unblock_mgmt_io(phba);
|
||
}
|
||
|
||
/**
|
||
* lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is invoked to handle the deferred HBA hardware error
|
||
* conditions. This type of error is indicated by HBA by setting ER1
|
||
* and another ER bit in the host status register. The driver will
|
||
* wait until the ER1 bit clears before handling the error condition.
|
||
**/
|
||
static void
|
||
lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
|
||
{
|
||
uint32_t old_host_status = phba->work_hs;
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
|
||
/* If the pci channel is offline, ignore possible errors,
|
||
* since we cannot communicate with the pci card anyway.
|
||
*/
|
||
if (pci_channel_offline(phba->pcidev)) {
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->hba_flag &= ~DEFER_ERATT;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
return;
|
||
}
|
||
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0479 Deferred Adapter Hardware Error "
|
||
"Data: x%x x%x x%x\n",
|
||
phba->work_hs, phba->work_status[0],
|
||
phba->work_status[1]);
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
|
||
/*
|
||
* Firmware stops when it triggred erratt. That could cause the I/Os
|
||
* dropped by the firmware. Error iocb (I/O) on txcmplq and let the
|
||
* SCSI layer retry it after re-establishing link.
|
||
*/
|
||
lpfc_sli_abort_fcp_rings(phba);
|
||
|
||
/*
|
||
* There was a firmware error. Take the hba offline and then
|
||
* attempt to restart it.
|
||
*/
|
||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||
lpfc_offline(phba);
|
||
|
||
/* Wait for the ER1 bit to clear.*/
|
||
while (phba->work_hs & HS_FFER1) {
|
||
msleep(100);
|
||
if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
|
||
phba->work_hs = UNPLUG_ERR ;
|
||
break;
|
||
}
|
||
/* If driver is unloading let the worker thread continue */
|
||
if (phba->pport->load_flag & FC_UNLOADING) {
|
||
phba->work_hs = 0;
|
||
break;
|
||
}
|
||
}
|
||
|
||
/*
|
||
* This is to ptrotect against a race condition in which
|
||
* first write to the host attention register clear the
|
||
* host status register.
|
||
*/
|
||
if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
|
||
phba->work_hs = old_host_status & ~HS_FFER1;
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->hba_flag &= ~DEFER_ERATT;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
|
||
phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
|
||
}
|
||
|
||
static void
|
||
lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_board_event_header board_event;
|
||
struct Scsi_Host *shost;
|
||
|
||
board_event.event_type = FC_REG_BOARD_EVENT;
|
||
board_event.subcategory = LPFC_EVENT_PORTINTERR;
|
||
shost = lpfc_shost_from_vport(phba->pport);
|
||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||
sizeof(board_event),
|
||
(char *) &board_event,
|
||
LPFC_NL_VENDOR_ID);
|
||
}
|
||
|
||
/**
|
||
* lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is invoked to handle the following HBA hardware error
|
||
* conditions:
|
||
* 1 - HBA error attention interrupt
|
||
* 2 - DMA ring index out of range
|
||
* 3 - Mailbox command came back as unknown
|
||
**/
|
||
static void
|
||
lpfc_handle_eratt_s3(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
uint32_t event_data;
|
||
unsigned long temperature;
|
||
struct temp_event temp_event_data;
|
||
struct Scsi_Host *shost;
|
||
|
||
/* If the pci channel is offline, ignore possible errors,
|
||
* since we cannot communicate with the pci card anyway.
|
||
*/
|
||
if (pci_channel_offline(phba->pcidev)) {
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->hba_flag &= ~DEFER_ERATT;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
return;
|
||
}
|
||
|
||
/* If resets are disabled then leave the HBA alone and return */
|
||
if (!phba->cfg_enable_hba_reset)
|
||
return;
|
||
|
||
/* Send an internal error event to mgmt application */
|
||
lpfc_board_errevt_to_mgmt(phba);
|
||
|
||
if (phba->hba_flag & DEFER_ERATT)
|
||
lpfc_handle_deferred_eratt(phba);
|
||
|
||
if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
|
||
if (phba->work_hs & HS_FFER6)
|
||
/* Re-establishing Link */
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
|
||
"1301 Re-establishing Link "
|
||
"Data: x%x x%x x%x\n",
|
||
phba->work_hs, phba->work_status[0],
|
||
phba->work_status[1]);
|
||
if (phba->work_hs & HS_FFER8)
|
||
/* Device Zeroization */
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
|
||
"2861 Host Authentication device "
|
||
"zeroization Data:x%x x%x x%x\n",
|
||
phba->work_hs, phba->work_status[0],
|
||
phba->work_status[1]);
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
/*
|
||
* Firmware stops when it triggled erratt with HS_FFER6.
|
||
* That could cause the I/Os dropped by the firmware.
|
||
* Error iocb (I/O) on txcmplq and let the SCSI layer
|
||
* retry it after re-establishing link.
|
||
*/
|
||
lpfc_sli_abort_fcp_rings(phba);
|
||
|
||
/*
|
||
* There was a firmware error. Take the hba offline and then
|
||
* attempt to restart it.
|
||
*/
|
||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||
lpfc_offline(phba);
|
||
lpfc_sli_brdrestart(phba);
|
||
if (lpfc_online(phba) == 0) { /* Initialize the HBA */
|
||
lpfc_unblock_mgmt_io(phba);
|
||
return;
|
||
}
|
||
lpfc_unblock_mgmt_io(phba);
|
||
} else if (phba->work_hs & HS_CRIT_TEMP) {
|
||
temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
|
||
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
|
||
temp_event_data.event_code = LPFC_CRIT_TEMP;
|
||
temp_event_data.data = (uint32_t)temperature;
|
||
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0406 Adapter maximum temperature exceeded "
|
||
"(%ld), taking this port offline "
|
||
"Data: x%x x%x x%x\n",
|
||
temperature, phba->work_hs,
|
||
phba->work_status[0], phba->work_status[1]);
|
||
|
||
shost = lpfc_shost_from_vport(phba->pport);
|
||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||
sizeof(temp_event_data),
|
||
(char *) &temp_event_data,
|
||
SCSI_NL_VID_TYPE_PCI
|
||
| PCI_VENDOR_ID_EMULEX);
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->over_temp_state = HBA_OVER_TEMP;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
lpfc_offline_eratt(phba);
|
||
|
||
} else {
|
||
/* The if clause above forces this code path when the status
|
||
* failure is a value other than FFER6. Do not call the offline
|
||
* twice. This is the adapter hardware error path.
|
||
*/
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0457 Adapter Hardware Error "
|
||
"Data: x%x x%x x%x\n",
|
||
phba->work_hs,
|
||
phba->work_status[0], phba->work_status[1]);
|
||
|
||
event_data = FC_REG_DUMP_EVENT;
|
||
shost = lpfc_shost_from_vport(vport);
|
||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||
sizeof(event_data), (char *) &event_data,
|
||
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
|
||
|
||
lpfc_offline_eratt(phba);
|
||
}
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @mbx_action: flag for mailbox shutdown action.
|
||
* @en_rn_msg: send reset/port recovery message.
|
||
* This routine is invoked to perform an SLI4 port PCI function reset in
|
||
* response to port status register polling attention. It waits for port
|
||
* status register (ERR, RDY, RN) bits before proceeding with function reset.
|
||
* During this process, interrupt vectors are freed and later requested
|
||
* for handling possible port resource change.
|
||
**/
|
||
static int
|
||
lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
|
||
bool en_rn_msg)
|
||
{
|
||
int rc;
|
||
uint32_t intr_mode;
|
||
LPFC_MBOXQ_t *mboxq;
|
||
|
||
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
|
||
LPFC_SLI_INTF_IF_TYPE_2) {
|
||
/*
|
||
* On error status condition, driver need to wait for port
|
||
* ready before performing reset.
|
||
*/
|
||
rc = lpfc_sli4_pdev_status_reg_wait(phba);
|
||
if (rc)
|
||
return rc;
|
||
}
|
||
|
||
/* need reset: attempt for port recovery */
|
||
if (en_rn_msg)
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||
"2887 Reset Needed: Attempting Port "
|
||
"Recovery...\n");
|
||
|
||
/* If we are no wait, the HBA has been reset and is not
|
||
* functional, thus we should clear
|
||
* (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
|
||
*/
|
||
if (mbx_action == LPFC_MBX_NO_WAIT) {
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
|
||
if (phba->sli.mbox_active) {
|
||
mboxq = phba->sli.mbox_active;
|
||
mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
|
||
__lpfc_mbox_cmpl_put(phba, mboxq);
|
||
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
|
||
phba->sli.mbox_active = NULL;
|
||
}
|
||
spin_unlock_irq(&phba->hbalock);
|
||
}
|
||
|
||
lpfc_offline_prep(phba, mbx_action);
|
||
lpfc_sli_flush_io_rings(phba);
|
||
lpfc_offline(phba);
|
||
/* release interrupt for possible resource change */
|
||
lpfc_sli4_disable_intr(phba);
|
||
rc = lpfc_sli_brdrestart(phba);
|
||
if (rc) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"6309 Failed to restart board\n");
|
||
return rc;
|
||
}
|
||
/* request and enable interrupt */
|
||
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
|
||
if (intr_mode == LPFC_INTR_ERROR) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3175 Failed to enable interrupt\n");
|
||
return -EIO;
|
||
}
|
||
phba->intr_mode = intr_mode;
|
||
rc = lpfc_online(phba);
|
||
if (rc == 0)
|
||
lpfc_unblock_mgmt_io(phba);
|
||
|
||
return rc;
|
||
}
|
||
|
||
/**
|
||
* lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is invoked to handle the SLI4 HBA hardware error attention
|
||
* conditions.
|
||
**/
|
||
static void
|
||
lpfc_handle_eratt_s4(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
uint32_t event_data;
|
||
struct Scsi_Host *shost;
|
||
uint32_t if_type;
|
||
struct lpfc_register portstat_reg = {0};
|
||
uint32_t reg_err1, reg_err2;
|
||
uint32_t uerrlo_reg, uemasklo_reg;
|
||
uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
|
||
bool en_rn_msg = true;
|
||
struct temp_event temp_event_data;
|
||
struct lpfc_register portsmphr_reg;
|
||
int rc, i;
|
||
|
||
/* If the pci channel is offline, ignore possible errors, since
|
||
* we cannot communicate with the pci card anyway.
|
||
*/
|
||
if (pci_channel_offline(phba->pcidev)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3166 pci channel is offline\n");
|
||
lpfc_sli_flush_io_rings(phba);
|
||
return;
|
||
}
|
||
|
||
memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
|
||
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
|
||
switch (if_type) {
|
||
case LPFC_SLI_INTF_IF_TYPE_0:
|
||
pci_rd_rc1 = lpfc_readl(
|
||
phba->sli4_hba.u.if_type0.UERRLOregaddr,
|
||
&uerrlo_reg);
|
||
pci_rd_rc2 = lpfc_readl(
|
||
phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
|
||
&uemasklo_reg);
|
||
/* consider PCI bus read error as pci_channel_offline */
|
||
if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
|
||
return;
|
||
if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
|
||
lpfc_sli4_offline_eratt(phba);
|
||
return;
|
||
}
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"7623 Checking UE recoverable");
|
||
|
||
for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
|
||
if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
|
||
&portsmphr_reg.word0))
|
||
continue;
|
||
|
||
smphr_port_status = bf_get(lpfc_port_smphr_port_status,
|
||
&portsmphr_reg);
|
||
if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
|
||
LPFC_PORT_SEM_UE_RECOVERABLE)
|
||
break;
|
||
/*Sleep for 1Sec, before checking SEMAPHORE */
|
||
msleep(1000);
|
||
}
|
||
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"4827 smphr_port_status x%x : Waited %dSec",
|
||
smphr_port_status, i);
|
||
|
||
/* Recoverable UE, reset the HBA device */
|
||
if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
|
||
LPFC_PORT_SEM_UE_RECOVERABLE) {
|
||
for (i = 0; i < 20; i++) {
|
||
msleep(1000);
|
||
if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
|
||
&portsmphr_reg.word0) &&
|
||
(LPFC_POST_STAGE_PORT_READY ==
|
||
bf_get(lpfc_port_smphr_port_status,
|
||
&portsmphr_reg))) {
|
||
rc = lpfc_sli4_port_sta_fn_reset(phba,
|
||
LPFC_MBX_NO_WAIT, en_rn_msg);
|
||
if (rc == 0)
|
||
return;
|
||
lpfc_printf_log(phba, KERN_ERR,
|
||
LOG_TRACE_EVENT,
|
||
"4215 Failed to recover UE");
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"7624 Firmware not ready: Failing UE recovery,"
|
||
" waited %dSec", i);
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
break;
|
||
|
||
case LPFC_SLI_INTF_IF_TYPE_2:
|
||
case LPFC_SLI_INTF_IF_TYPE_6:
|
||
pci_rd_rc1 = lpfc_readl(
|
||
phba->sli4_hba.u.if_type2.STATUSregaddr,
|
||
&portstat_reg.word0);
|
||
/* consider PCI bus read error as pci_channel_offline */
|
||
if (pci_rd_rc1 == -EIO) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3151 PCI bus read access failure: x%x\n",
|
||
readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
|
||
lpfc_sli4_offline_eratt(phba);
|
||
return;
|
||
}
|
||
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
|
||
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
|
||
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"2889 Port Overtemperature event, "
|
||
"taking port offline Data: x%x x%x\n",
|
||
reg_err1, reg_err2);
|
||
|
||
phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
|
||
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
|
||
temp_event_data.event_code = LPFC_CRIT_TEMP;
|
||
temp_event_data.data = 0xFFFFFFFF;
|
||
|
||
shost = lpfc_shost_from_vport(phba->pport);
|
||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||
sizeof(temp_event_data),
|
||
(char *)&temp_event_data,
|
||
SCSI_NL_VID_TYPE_PCI
|
||
| PCI_VENDOR_ID_EMULEX);
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->over_temp_state = HBA_OVER_TEMP;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
lpfc_sli4_offline_eratt(phba);
|
||
return;
|
||
}
|
||
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||
"3143 Port Down: Firmware Update "
|
||
"Detected\n");
|
||
en_rn_msg = false;
|
||
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3144 Port Down: Debug Dump\n");
|
||
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3145 Port Down: Provisioning\n");
|
||
|
||
/* If resets are disabled then leave the HBA alone and return */
|
||
if (!phba->cfg_enable_hba_reset)
|
||
return;
|
||
|
||
/* Check port status register for function reset */
|
||
rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
|
||
en_rn_msg);
|
||
if (rc == 0) {
|
||
/* don't report event on forced debug dump */
|
||
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
|
||
return;
|
||
else
|
||
break;
|
||
}
|
||
/* fall through for not able to recover */
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"3152 Unrecoverable error\n");
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
break;
|
||
case LPFC_SLI_INTF_IF_TYPE_1:
|
||
default:
|
||
break;
|
||
}
|
||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||
"3123 Report dump event to upper layer\n");
|
||
/* Send an internal error event to mgmt application */
|
||
lpfc_board_errevt_to_mgmt(phba);
|
||
|
||
event_data = FC_REG_DUMP_EVENT;
|
||
shost = lpfc_shost_from_vport(vport);
|
||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||
sizeof(event_data), (char *) &event_data,
|
||
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
|
||
}
|
||
|
||
/**
|
||
* lpfc_handle_eratt - Wrapper func for handling hba error attention
|
||
* @phba: pointer to lpfc HBA data structure.
|
||
*
|
||
* This routine wraps the actual SLI3 or SLI4 hba error attention handling
|
||
* routine from the API jump table function pointer from the lpfc_hba struct.
|
||
*
|
||
* Return codes
|
||
* 0 - success.
|
||
* Any other value - error.
|
||
**/
|
||
void
|
||
lpfc_handle_eratt(struct lpfc_hba *phba)
|
||
{
|
||
(*phba->lpfc_handle_eratt)(phba);
|
||
}
|
||
|
||
/**
|
||
* lpfc_handle_latt - The HBA link event handler
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is invoked from the worker thread to handle a HBA host
|
||
* attention link event. SLI3 only.
|
||
**/
|
||
void
|
||
lpfc_handle_latt(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
LPFC_MBOXQ_t *pmb;
|
||
volatile uint32_t control;
|
||
int rc = 0;
|
||
|
||
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||
if (!pmb) {
|
||
rc = 1;
|
||
goto lpfc_handle_latt_err_exit;
|
||
}
|
||
|
||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||
if (rc) {
|
||
rc = 2;
|
||
mempool_free(pmb, phba->mbox_mem_pool);
|
||
goto lpfc_handle_latt_err_exit;
|
||
}
|
||
|
||
/* Cleanup any outstanding ELS commands */
|
||
lpfc_els_flush_all_cmd(phba);
|
||
psli->slistat.link_event++;
|
||
lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
|
||
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
|
||
pmb->vport = vport;
|
||
/* Block ELS IOCBs until we have processed this mbox command */
|
||
phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
||
rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
|
||
if (rc == MBX_NOT_FINISHED) {
|
||
rc = 4;
|
||
goto lpfc_handle_latt_free_mbuf;
|
||
}
|
||
|
||
/* Clear Link Attention in HA REG */
|
||
spin_lock_irq(&phba->hbalock);
|
||
writel(HA_LATT, phba->HAregaddr);
|
||
readl(phba->HAregaddr); /* flush */
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
return;
|
||
|
||
lpfc_handle_latt_free_mbuf:
|
||
phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
|
||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||
lpfc_handle_latt_err_exit:
|
||
/* Enable Link attention interrupts */
|
||
spin_lock_irq(&phba->hbalock);
|
||
psli->sli_flag |= LPFC_PROCESS_LA;
|
||
control = readl(phba->HCregaddr);
|
||
control |= HC_LAINT_ENA;
|
||
writel(control, phba->HCregaddr);
|
||
readl(phba->HCregaddr); /* flush */
|
||
|
||
/* Clear Link Attention in HA REG */
|
||
writel(HA_LATT, phba->HAregaddr);
|
||
readl(phba->HAregaddr); /* flush */
|
||
spin_unlock_irq(&phba->hbalock);
|
||
lpfc_linkdown(phba);
|
||
phba->link_state = LPFC_HBA_ERROR;
|
||
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
|
||
|
||
return;
|
||
}
|
||
|
||
static void
|
||
lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
|
||
{
|
||
int i, j;
|
||
|
||
while (length > 0) {
|
||
/* Look for Serial Number */
|
||
if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
j = 0;
|
||
length -= (3+i);
|
||
while (i--) {
|
||
phba->SerialNumber[j++] = vpd[(*pindex)++];
|
||
if (j == 31)
|
||
break;
|
||
}
|
||
phba->SerialNumber[j] = 0;
|
||
continue;
|
||
} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
|
||
phba->vpd_flag |= VPD_MODEL_DESC;
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
j = 0;
|
||
length -= (3+i);
|
||
while (i--) {
|
||
phba->ModelDesc[j++] = vpd[(*pindex)++];
|
||
if (j == 255)
|
||
break;
|
||
}
|
||
phba->ModelDesc[j] = 0;
|
||
continue;
|
||
} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
|
||
phba->vpd_flag |= VPD_MODEL_NAME;
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
j = 0;
|
||
length -= (3+i);
|
||
while (i--) {
|
||
phba->ModelName[j++] = vpd[(*pindex)++];
|
||
if (j == 79)
|
||
break;
|
||
}
|
||
phba->ModelName[j] = 0;
|
||
continue;
|
||
} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
|
||
phba->vpd_flag |= VPD_PROGRAM_TYPE;
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
j = 0;
|
||
length -= (3+i);
|
||
while (i--) {
|
||
phba->ProgramType[j++] = vpd[(*pindex)++];
|
||
if (j == 255)
|
||
break;
|
||
}
|
||
phba->ProgramType[j] = 0;
|
||
continue;
|
||
} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
|
||
phba->vpd_flag |= VPD_PORT;
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
j = 0;
|
||
length -= (3 + i);
|
||
while (i--) {
|
||
if ((phba->sli_rev == LPFC_SLI_REV4) &&
|
||
(phba->sli4_hba.pport_name_sta ==
|
||
LPFC_SLI4_PPNAME_GET)) {
|
||
j++;
|
||
(*pindex)++;
|
||
} else
|
||
phba->Port[j++] = vpd[(*pindex)++];
|
||
if (j == 19)
|
||
break;
|
||
}
|
||
if ((phba->sli_rev != LPFC_SLI_REV4) ||
|
||
(phba->sli4_hba.pport_name_sta ==
|
||
LPFC_SLI4_PPNAME_NON))
|
||
phba->Port[j] = 0;
|
||
continue;
|
||
} else {
|
||
*pindex += 2;
|
||
i = vpd[*pindex];
|
||
*pindex += 1;
|
||
*pindex += i;
|
||
length -= (3 + i);
|
||
}
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @vpd: pointer to the vital product data.
|
||
* @len: length of the vital product data in bytes.
|
||
*
|
||
* This routine parses the Vital Product Data (VPD). The VPD is treated as
|
||
* an array of characters. In this routine, the ModelName, ProgramType, and
|
||
* ModelDesc, etc. fields of the phba data structure will be populated.
|
||
*
|
||
* Return codes
|
||
* 0 - pointer to the VPD passed in is NULL
|
||
* 1 - success
|
||
**/
|
||
int
|
||
lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
|
||
{
|
||
uint8_t lenlo, lenhi;
|
||
int Length;
|
||
int i;
|
||
int finished = 0;
|
||
int index = 0;
|
||
|
||
if (!vpd)
|
||
return 0;
|
||
|
||
/* Vital Product */
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"0455 Vital Product Data: x%x x%x x%x x%x\n",
|
||
(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
|
||
(uint32_t) vpd[3]);
|
||
while (!finished && (index < (len - 4))) {
|
||
switch (vpd[index]) {
|
||
case 0x82:
|
||
case 0x91:
|
||
index += 1;
|
||
lenlo = vpd[index];
|
||
index += 1;
|
||
lenhi = vpd[index];
|
||
index += 1;
|
||
i = ((((unsigned short)lenhi) << 8) + lenlo);
|
||
index += i;
|
||
break;
|
||
case 0x90:
|
||
index += 1;
|
||
lenlo = vpd[index];
|
||
index += 1;
|
||
lenhi = vpd[index];
|
||
index += 1;
|
||
Length = ((((unsigned short)lenhi) << 8) + lenlo);
|
||
if (Length > len - index)
|
||
Length = len - index;
|
||
|
||
lpfc_fill_vpd(phba, vpd, Length, &index);
|
||
finished = 0;
|
||
break;
|
||
case 0x78:
|
||
finished = 1;
|
||
break;
|
||
default:
|
||
index ++;
|
||
break;
|
||
}
|
||
}
|
||
|
||
return(1);
|
||
}
|
||
|
||
/**
|
||
* lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @mdp: pointer to the data structure to hold the derived model name.
|
||
* @descp: pointer to the data structure to hold the derived description.
|
||
*
|
||
* This routine retrieves HBA's description based on its registered PCI device
|
||
* ID. The @descp passed into this function points to an array of 256 chars. It
|
||
* shall be returned with the model name, maximum speed, and the host bus type.
|
||
* The @mdp passed into this function points to an array of 80 chars. When the
|
||
* function returns, the @mdp will be filled with the model name.
|
||
**/
|
||
static void
|
||
lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
||
{
|
||
uint16_t sub_dev_id = phba->pcidev->subsystem_device;
|
||
char *model = "<Unknown>";
|
||
int tbolt = 0;
|
||
|
||
switch (sub_dev_id) {
|
||
case PCI_DEVICE_ID_CLRY_161E:
|
||
model = "161E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_162E:
|
||
model = "162E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_164E:
|
||
model = "164E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_161P:
|
||
model = "161P";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_162P:
|
||
model = "162P";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_164P:
|
||
model = "164P";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_321E:
|
||
model = "321E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_322E:
|
||
model = "322E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_324E:
|
||
model = "324E";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_321P:
|
||
model = "321P";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_322P:
|
||
model = "322P";
|
||
break;
|
||
case PCI_DEVICE_ID_CLRY_324P:
|
||
model = "324P";
|
||
break;
|
||
case PCI_DEVICE_ID_TLFC_2XX2:
|
||
model = "2XX2";
|
||
tbolt = 1;
|
||
break;
|
||
case PCI_DEVICE_ID_TLFC_3162:
|
||
model = "3162";
|
||
tbolt = 1;
|
||
break;
|
||
case PCI_DEVICE_ID_TLFC_3322:
|
||
model = "3322";
|
||
tbolt = 1;
|
||
break;
|
||
default:
|
||
model = "Unknown";
|
||
break;
|
||
}
|
||
|
||
if (mdp && mdp[0] == '\0')
|
||
snprintf(mdp, 79, "%s", model);
|
||
|
||
if (descp && descp[0] == '\0')
|
||
snprintf(descp, 255,
|
||
"ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
|
||
(tbolt) ? "ThunderLink FC " : "Celerity FC-",
|
||
model,
|
||
phba->Port);
|
||
}
|
||
|
||
/**
|
||
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @mdp: pointer to the data structure to hold the derived model name.
|
||
* @descp: pointer to the data structure to hold the derived description.
|
||
*
|
||
* This routine retrieves HBA's description based on its registered PCI device
|
||
* ID. The @descp passed into this function points to an array of 256 chars. It
|
||
* shall be returned with the model name, maximum speed, and the host bus type.
|
||
* The @mdp passed into this function points to an array of 80 chars. When the
|
||
* function returns, the @mdp will be filled with the model name.
|
||
**/
|
||
static void
|
||
lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
||
{
|
||
lpfc_vpd_t *vp;
|
||
uint16_t dev_id = phba->pcidev->device;
|
||
int max_speed;
|
||
int GE = 0;
|
||
int oneConnect = 0; /* default is not a oneConnect */
|
||
struct {
|
||
char *name;
|
||
char *bus;
|
||
char *function;
|
||
} m = {"<Unknown>", "", ""};
|
||
|
||
if (mdp && mdp[0] != '\0'
|
||
&& descp && descp[0] != '\0')
|
||
return;
|
||
|
||
if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
|
||
lpfc_get_atto_model_desc(phba, mdp, descp);
|
||
return;
|
||
}
|
||
|
||
if (phba->lmt & LMT_64Gb)
|
||
max_speed = 64;
|
||
else if (phba->lmt & LMT_32Gb)
|
||
max_speed = 32;
|
||
else if (phba->lmt & LMT_16Gb)
|
||
max_speed = 16;
|
||
else if (phba->lmt & LMT_10Gb)
|
||
max_speed = 10;
|
||
else if (phba->lmt & LMT_8Gb)
|
||
max_speed = 8;
|
||
else if (phba->lmt & LMT_4Gb)
|
||
max_speed = 4;
|
||
else if (phba->lmt & LMT_2Gb)
|
||
max_speed = 2;
|
||
else if (phba->lmt & LMT_1Gb)
|
||
max_speed = 1;
|
||
else
|
||
max_speed = 0;
|
||
|
||
vp = &phba->vpd;
|
||
|
||
switch (dev_id) {
|
||
case PCI_DEVICE_ID_FIREFLY:
|
||
m = (typeof(m)){"LP6000", "PCI",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SUPERFLY:
|
||
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
|
||
m = (typeof(m)){"LP7000", "PCI", ""};
|
||
else
|
||
m = (typeof(m)){"LP7000E", "PCI", ""};
|
||
m.function = "Obsolete, Unsupported Fibre Channel Adapter";
|
||
break;
|
||
case PCI_DEVICE_ID_DRAGONFLY:
|
||
m = (typeof(m)){"LP8000", "PCI",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_CENTAUR:
|
||
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
|
||
m = (typeof(m)){"LP9002", "PCI", ""};
|
||
else
|
||
m = (typeof(m)){"LP9000", "PCI", ""};
|
||
m.function = "Obsolete, Unsupported Fibre Channel Adapter";
|
||
break;
|
||
case PCI_DEVICE_ID_RFLY:
|
||
m = (typeof(m)){"LP952", "PCI",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_PEGASUS:
|
||
m = (typeof(m)){"LP9802", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_THOR:
|
||
m = (typeof(m)){"LP10000", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_VIPER:
|
||
m = (typeof(m)){"LPX1000", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_PFLY:
|
||
m = (typeof(m)){"LP982", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_TFLY:
|
||
m = (typeof(m)){"LP1050", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_HELIOS:
|
||
m = (typeof(m)){"LP11000", "PCI-X2",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_HELIOS_SCSP:
|
||
m = (typeof(m)){"LP11000-SP", "PCI-X2",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_HELIOS_DCSP:
|
||
m = (typeof(m)){"LP11002-SP", "PCI-X2",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_NEPTUNE:
|
||
m = (typeof(m)){"LPe1000", "PCIe",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_NEPTUNE_SCSP:
|
||
m = (typeof(m)){"LPe1000-SP", "PCIe",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_NEPTUNE_DCSP:
|
||
m = (typeof(m)){"LPe1002-SP", "PCIe",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_BMID:
|
||
m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_BSMB:
|
||
m = (typeof(m)){"LP111", "PCI-X2",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_ZEPHYR:
|
||
m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_ZEPHYR_SCSP:
|
||
m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_ZEPHYR_DCSP:
|
||
m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
|
||
GE = 1;
|
||
break;
|
||
case PCI_DEVICE_ID_ZMID:
|
||
m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_ZSMB:
|
||
m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LP101:
|
||
m = (typeof(m)){"LP101", "PCI-X",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LP10000S:
|
||
m = (typeof(m)){"LP10000-S", "PCI",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LP11000S:
|
||
m = (typeof(m)){"LP11000-S", "PCI-X2",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LPE11000S:
|
||
m = (typeof(m)){"LPe11000-S", "PCIe",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT:
|
||
m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT_MID:
|
||
m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT_SMB:
|
||
m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT_DCSP:
|
||
m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT_SCSP:
|
||
m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SAT_S:
|
||
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_PROTEUS_VF:
|
||
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_PROTEUS_PF:
|
||
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_PROTEUS_S:
|
||
m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_TIGERSHARK:
|
||
oneConnect = 1;
|
||
m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
|
||
break;
|
||
case PCI_DEVICE_ID_TOMCAT:
|
||
oneConnect = 1;
|
||
m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
|
||
break;
|
||
case PCI_DEVICE_ID_FALCON:
|
||
m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
|
||
"EmulexSecure Fibre"};
|
||
break;
|
||
case PCI_DEVICE_ID_BALIUS:
|
||
m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_FC:
|
||
m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_FC_VF:
|
||
m = (typeof(m)){"LPe16000", "PCIe",
|
||
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_FCOE:
|
||
oneConnect = 1;
|
||
m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_FCOE_VF:
|
||
oneConnect = 1;
|
||
m = (typeof(m)){"OCe15100", "PCIe",
|
||
"Obsolete, Unsupported FCoE"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_G6_FC:
|
||
m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_G7_FC:
|
||
m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_LANCER_G7P_FC:
|
||
m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
|
||
break;
|
||
case PCI_DEVICE_ID_SKYHAWK:
|
||
case PCI_DEVICE_ID_SKYHAWK_VF:
|
||
oneConnect = 1;
|
||
m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
|
||
break;
|
||
default:
|
||
m = (typeof(m)){"Unknown", "", ""};
|
||
break;
|
||
}
|
||
|
||
if (mdp && mdp[0] == '\0')
|
||
snprintf(mdp, 79,"%s", m.name);
|
||
/*
|
||
* oneConnect hba requires special processing, they are all initiators
|
||
* and we put the port number on the end
|
||
*/
|
||
if (descp && descp[0] == '\0') {
|
||
if (oneConnect)
|
||
snprintf(descp, 255,
|
||
"Emulex OneConnect %s, %s Initiator %s",
|
||
m.name, m.function,
|
||
phba->Port);
|
||
else if (max_speed == 0)
|
||
snprintf(descp, 255,
|
||
"Emulex %s %s %s",
|
||
m.name, m.bus, m.function);
|
||
else
|
||
snprintf(descp, 255,
|
||
"Emulex %s %d%s %s %s",
|
||
m.name, max_speed, (GE) ? "GE" : "Gb",
|
||
m.bus, m.function);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @pring: pointer to a IOCB ring.
|
||
* @cnt: the number of IOCBs to be posted to the IOCB ring.
|
||
*
|
||
* This routine posts a given number of IOCBs with the associated DMA buffer
|
||
* descriptors specified by the cnt argument to the given IOCB ring.
|
||
*
|
||
* Return codes
|
||
* The number of IOCBs NOT able to be posted to the IOCB ring.
|
||
**/
|
||
int
|
||
lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
|
||
{
|
||
IOCB_t *icmd;
|
||
struct lpfc_iocbq *iocb;
|
||
struct lpfc_dmabuf *mp1, *mp2;
|
||
|
||
cnt += pring->missbufcnt;
|
||
|
||
/* While there are buffers to post */
|
||
while (cnt > 0) {
|
||
/* Allocate buffer for command iocb */
|
||
iocb = lpfc_sli_get_iocbq(phba);
|
||
if (iocb == NULL) {
|
||
pring->missbufcnt = cnt;
|
||
return cnt;
|
||
}
|
||
icmd = &iocb->iocb;
|
||
|
||
/* 2 buffers can be posted per command */
|
||
/* Allocate buffer to post */
|
||
mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
|
||
if (mp1)
|
||
mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
|
||
if (!mp1 || !mp1->virt) {
|
||
kfree(mp1);
|
||
lpfc_sli_release_iocbq(phba, iocb);
|
||
pring->missbufcnt = cnt;
|
||
return cnt;
|
||
}
|
||
|
||
INIT_LIST_HEAD(&mp1->list);
|
||
/* Allocate buffer to post */
|
||
if (cnt > 1) {
|
||
mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
|
||
if (mp2)
|
||
mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
|
||
&mp2->phys);
|
||
if (!mp2 || !mp2->virt) {
|
||
kfree(mp2);
|
||
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
|
||
kfree(mp1);
|
||
lpfc_sli_release_iocbq(phba, iocb);
|
||
pring->missbufcnt = cnt;
|
||
return cnt;
|
||
}
|
||
|
||
INIT_LIST_HEAD(&mp2->list);
|
||
} else {
|
||
mp2 = NULL;
|
||
}
|
||
|
||
icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
|
||
icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
|
||
icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
|
||
icmd->ulpBdeCount = 1;
|
||
cnt--;
|
||
if (mp2) {
|
||
icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
|
||
icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
|
||
icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
|
||
cnt--;
|
||
icmd->ulpBdeCount = 2;
|
||
}
|
||
|
||
icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
|
||
icmd->ulpLe = 1;
|
||
|
||
if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
|
||
IOCB_ERROR) {
|
||
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
|
||
kfree(mp1);
|
||
cnt++;
|
||
if (mp2) {
|
||
lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
|
||
kfree(mp2);
|
||
cnt++;
|
||
}
|
||
lpfc_sli_release_iocbq(phba, iocb);
|
||
pring->missbufcnt = cnt;
|
||
return cnt;
|
||
}
|
||
lpfc_sli_ringpostbuf_put(phba, pring, mp1);
|
||
if (mp2)
|
||
lpfc_sli_ringpostbuf_put(phba, pring, mp2);
|
||
}
|
||
pring->missbufcnt = 0;
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine posts initial receive IOCB buffers to the ELS ring. The
|
||
* current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
|
||
* set to 64 IOCBs. SLI3 only.
|
||
*
|
||
* Return codes
|
||
* 0 - success (currently always success)
|
||
**/
|
||
static int
|
||
lpfc_post_rcv_buf(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli *psli = &phba->sli;
|
||
|
||
/* Ring 0, ELS / CT buffers */
|
||
lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
|
||
/* Ring 2 - FCP no buffers needed */
|
||
|
||
return 0;
|
||
}
|
||
|
||
#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
|
||
|
||
/**
|
||
* lpfc_sha_init - Set up initial array of hash table entries
|
||
* @HashResultPointer: pointer to an array as hash table.
|
||
*
|
||
* This routine sets up the initial values to the array of hash table entries
|
||
* for the LC HBAs.
|
||
**/
|
||
static void
|
||
lpfc_sha_init(uint32_t * HashResultPointer)
|
||
{
|
||
HashResultPointer[0] = 0x67452301;
|
||
HashResultPointer[1] = 0xEFCDAB89;
|
||
HashResultPointer[2] = 0x98BADCFE;
|
||
HashResultPointer[3] = 0x10325476;
|
||
HashResultPointer[4] = 0xC3D2E1F0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_sha_iterate - Iterate initial hash table with the working hash table
|
||
* @HashResultPointer: pointer to an initial/result hash table.
|
||
* @HashWorkingPointer: pointer to an working hash table.
|
||
*
|
||
* This routine iterates an initial hash table pointed by @HashResultPointer
|
||
* with the values from the working hash table pointeed by @HashWorkingPointer.
|
||
* The results are putting back to the initial hash table, returned through
|
||
* the @HashResultPointer as the result hash table.
|
||
**/
|
||
static void
|
||
lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
|
||
{
|
||
int t;
|
||
uint32_t TEMP;
|
||
uint32_t A, B, C, D, E;
|
||
t = 16;
|
||
do {
|
||
HashWorkingPointer[t] =
|
||
S(1,
|
||
HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
|
||
8] ^
|
||
HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
|
||
} while (++t <= 79);
|
||
t = 0;
|
||
A = HashResultPointer[0];
|
||
B = HashResultPointer[1];
|
||
C = HashResultPointer[2];
|
||
D = HashResultPointer[3];
|
||
E = HashResultPointer[4];
|
||
|
||
do {
|
||
if (t < 20) {
|
||
TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
|
||
} else if (t < 40) {
|
||
TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
|
||
} else if (t < 60) {
|
||
TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
|
||
} else {
|
||
TEMP = (B ^ C ^ D) + 0xCA62C1D6;
|
||
}
|
||
TEMP += S(5, A) + E + HashWorkingPointer[t];
|
||
E = D;
|
||
D = C;
|
||
C = S(30, B);
|
||
B = A;
|
||
A = TEMP;
|
||
} while (++t <= 79);
|
||
|
||
HashResultPointer[0] += A;
|
||
HashResultPointer[1] += B;
|
||
HashResultPointer[2] += C;
|
||
HashResultPointer[3] += D;
|
||
HashResultPointer[4] += E;
|
||
|
||
}
|
||
|
||
/**
|
||
* lpfc_challenge_key - Create challenge key based on WWPN of the HBA
|
||
* @RandomChallenge: pointer to the entry of host challenge random number array.
|
||
* @HashWorking: pointer to the entry of the working hash array.
|
||
*
|
||
* This routine calculates the working hash array referred by @HashWorking
|
||
* from the challenge random numbers associated with the host, referred by
|
||
* @RandomChallenge. The result is put into the entry of the working hash
|
||
* array and returned by reference through @HashWorking.
|
||
**/
|
||
static void
|
||
lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
|
||
{
|
||
*HashWorking = (*RandomChallenge ^ *HashWorking);
|
||
}
|
||
|
||
/**
|
||
* lpfc_hba_init - Perform special handling for LC HBA initialization
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @hbainit: pointer to an array of unsigned 32-bit integers.
|
||
*
|
||
* This routine performs the special handling for LC HBA initialization.
|
||
**/
|
||
void
|
||
lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
|
||
{
|
||
int t;
|
||
uint32_t *HashWorking;
|
||
uint32_t *pwwnn = (uint32_t *) phba->wwnn;
|
||
|
||
HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
|
||
if (!HashWorking)
|
||
return;
|
||
|
||
HashWorking[0] = HashWorking[78] = *pwwnn++;
|
||
HashWorking[1] = HashWorking[79] = *pwwnn;
|
||
|
||
for (t = 0; t < 7; t++)
|
||
lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
|
||
|
||
lpfc_sha_init(hbainit);
|
||
lpfc_sha_iterate(hbainit, HashWorking);
|
||
kfree(HashWorking);
|
||
}
|
||
|
||
/**
|
||
* lpfc_cleanup - Performs vport cleanups before deleting a vport
|
||
* @vport: pointer to a virtual N_Port data structure.
|
||
*
|
||
* This routine performs the necessary cleanups before deleting the @vport.
|
||
* It invokes the discovery state machine to perform necessary state
|
||
* transitions and to release the ndlps associated with the @vport. Note,
|
||
* the physical port is treated as @vport 0.
|
||
**/
|
||
void
|
||
lpfc_cleanup(struct lpfc_vport *vport)
|
||
{
|
||
struct lpfc_hba *phba = vport->phba;
|
||
struct lpfc_nodelist *ndlp, *next_ndlp;
|
||
int i = 0;
|
||
|
||
if (phba->link_state > LPFC_LINK_DOWN)
|
||
lpfc_port_link_failure(vport);
|
||
|
||
/* Clean up VMID resources */
|
||
if (lpfc_is_vmid_enabled(phba))
|
||
lpfc_vmid_vport_cleanup(vport);
|
||
|
||
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
|
||
if (vport->port_type != LPFC_PHYSICAL_PORT &&
|
||
ndlp->nlp_DID == Fabric_DID) {
|
||
/* Just free up ndlp with Fabric_DID for vports */
|
||
lpfc_nlp_put(ndlp);
|
||
continue;
|
||
}
|
||
|
||
if (ndlp->nlp_DID == Fabric_Cntl_DID &&
|
||
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
|
||
lpfc_nlp_put(ndlp);
|
||
continue;
|
||
}
|
||
|
||
/* Fabric Ports not in UNMAPPED state are cleaned up in the
|
||
* DEVICE_RM event.
|
||
*/
|
||
if (ndlp->nlp_type & NLP_FABRIC &&
|
||
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
|
||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||
NLP_EVT_DEVICE_RECOVERY);
|
||
|
||
if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
|
||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||
NLP_EVT_DEVICE_RM);
|
||
}
|
||
|
||
/* This is a special case flush to return all
|
||
* IOs before entering this loop. There are
|
||
* two points in the code where a flush is
|
||
* avoided if the FC_UNLOADING flag is set.
|
||
* one is in the multipool destroy,
|
||
* (this prevents a crash) and the other is
|
||
* in the nvme abort handler, ( also prevents
|
||
* a crash). Both of these exceptions are
|
||
* cases where the slot is still accessible.
|
||
* The flush here is only when the pci slot
|
||
* is offline.
|
||
*/
|
||
if (vport->load_flag & FC_UNLOADING &&
|
||
pci_channel_offline(phba->pcidev))
|
||
lpfc_sli_flush_io_rings(vport->phba);
|
||
|
||
/* At this point, ALL ndlp's should be gone
|
||
* because of the previous NLP_EVT_DEVICE_RM.
|
||
* Lets wait for this to happen, if needed.
|
||
*/
|
||
while (!list_empty(&vport->fc_nodes)) {
|
||
if (i++ > 3000) {
|
||
lpfc_printf_vlog(vport, KERN_ERR,
|
||
LOG_TRACE_EVENT,
|
||
"0233 Nodelist not empty\n");
|
||
list_for_each_entry_safe(ndlp, next_ndlp,
|
||
&vport->fc_nodes, nlp_listp) {
|
||
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
|
||
LOG_DISCOVERY,
|
||
"0282 did:x%x ndlp:x%px "
|
||
"refcnt:%d xflags x%x nflag x%x\n",
|
||
ndlp->nlp_DID, (void *)ndlp,
|
||
kref_read(&ndlp->kref),
|
||
ndlp->fc4_xpt_flags,
|
||
ndlp->nlp_flag);
|
||
}
|
||
break;
|
||
}
|
||
|
||
/* Wait for any activity on ndlps to settle */
|
||
msleep(10);
|
||
}
|
||
lpfc_cleanup_vports_rrqs(vport, NULL);
|
||
}
|
||
|
||
/**
|
||
* lpfc_stop_vport_timers - Stop all the timers associated with a vport
|
||
* @vport: pointer to a virtual N_Port data structure.
|
||
*
|
||
* This routine stops all the timers associated with a @vport. This function
|
||
* is invoked before disabling or deleting a @vport. Note that the physical
|
||
* port is treated as @vport 0.
|
||
**/
|
||
void
|
||
lpfc_stop_vport_timers(struct lpfc_vport *vport)
|
||
{
|
||
del_timer_sync(&vport->els_tmofunc);
|
||
del_timer_sync(&vport->delayed_disc_tmo);
|
||
lpfc_can_disctmo(vport);
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine stops the SLI4 FCF rediscover wait timer if it's on. The
|
||
* caller of this routine should already hold the host lock.
|
||
**/
|
||
void
|
||
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
|
||
{
|
||
/* Clear pending FCF rediscovery wait flag */
|
||
phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
|
||
|
||
/* Now, try to stop the timer */
|
||
del_timer(&phba->fcf.redisc_wait);
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine stops the SLI4 FCF rediscover wait timer if it's on. It
|
||
* checks whether the FCF rediscovery wait timer is pending with the host
|
||
* lock held before proceeding with disabling the timer and clearing the
|
||
* wait timer pendig flag.
|
||
**/
|
||
void
|
||
lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
|
||
{
|
||
spin_lock_irq(&phba->hbalock);
|
||
if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
|
||
/* FCF rediscovery timer already fired or stopped */
|
||
spin_unlock_irq(&phba->hbalock);
|
||
return;
|
||
}
|
||
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
|
||
/* Clear failover in progress flags */
|
||
phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
|
||
spin_unlock_irq(&phba->hbalock);
|
||
}
|
||
|
||
/**
|
||
* lpfc_cmf_stop - Stop CMF processing
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This is called when the link goes down or if CMF mode is turned OFF.
|
||
* It is also called when going offline or unloaded just before the
|
||
* congestion info buffer is unregistered.
|
||
**/
|
||
void
|
||
lpfc_cmf_stop(struct lpfc_hba *phba)
|
||
{
|
||
int cpu;
|
||
struct lpfc_cgn_stat *cgs;
|
||
|
||
/* We only do something if CMF is enabled */
|
||
if (!phba->sli4_hba.pc_sli4_params.cmf)
|
||
return;
|
||
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||
"6221 Stop CMF / Cancel Timer\n");
|
||
|
||
/* Cancel the CMF timer */
|
||
hrtimer_cancel(&phba->cmf_timer);
|
||
|
||
/* Zero CMF counters */
|
||
atomic_set(&phba->cmf_busy, 0);
|
||
for_each_present_cpu(cpu) {
|
||
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
|
||
atomic64_set(&cgs->total_bytes, 0);
|
||
atomic64_set(&cgs->rcv_bytes, 0);
|
||
atomic_set(&cgs->rx_io_cnt, 0);
|
||
atomic64_set(&cgs->rx_latency, 0);
|
||
}
|
||
atomic_set(&phba->cmf_bw_wait, 0);
|
||
|
||
/* Resume any blocked IO - Queue unblock on workqueue */
|
||
queue_work(phba->wq, &phba->unblock_request_work);
|
||
}
|
||
|
||
static inline uint64_t
|
||
lpfc_get_max_line_rate(struct lpfc_hba *phba)
|
||
{
|
||
uint64_t rate = lpfc_sli_port_speed_get(phba);
|
||
|
||
return ((((unsigned long)rate) * 1024 * 1024) / 10);
|
||
}
|
||
|
||
void
|
||
lpfc_cmf_signal_init(struct lpfc_hba *phba)
|
||
{
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||
"6223 Signal CMF init\n");
|
||
|
||
/* Use the new fc_linkspeed to recalculate */
|
||
phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
|
||
phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
|
||
phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
|
||
phba->cmf_interval_rate, 1000);
|
||
phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
|
||
|
||
/* This is a signal to firmware to sync up CMF BW with link speed */
|
||
lpfc_issue_cmf_sync_wqe(phba, 0, 0);
|
||
}
|
||
|
||
/**
|
||
* lpfc_cmf_start - Start CMF processing
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This is called when the link comes up or if CMF mode is turned OFF
|
||
* to Monitor or Managed.
|
||
**/
|
||
void
|
||
lpfc_cmf_start(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_cgn_stat *cgs;
|
||
int cpu;
|
||
|
||
/* We only do something if CMF is enabled */
|
||
if (!phba->sli4_hba.pc_sli4_params.cmf ||
|
||
phba->cmf_active_mode == LPFC_CFG_OFF)
|
||
return;
|
||
|
||
/* Reinitialize congestion buffer info */
|
||
lpfc_init_congestion_buf(phba);
|
||
|
||
atomic_set(&phba->cgn_fabric_warn_cnt, 0);
|
||
atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
|
||
atomic_set(&phba->cgn_sync_alarm_cnt, 0);
|
||
atomic_set(&phba->cgn_sync_warn_cnt, 0);
|
||
|
||
atomic_set(&phba->cmf_busy, 0);
|
||
for_each_present_cpu(cpu) {
|
||
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
|
||
atomic64_set(&cgs->total_bytes, 0);
|
||
atomic64_set(&cgs->rcv_bytes, 0);
|
||
atomic_set(&cgs->rx_io_cnt, 0);
|
||
atomic64_set(&cgs->rx_latency, 0);
|
||
}
|
||
phba->cmf_latency.tv_sec = 0;
|
||
phba->cmf_latency.tv_nsec = 0;
|
||
|
||
lpfc_cmf_signal_init(phba);
|
||
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||
"6222 Start CMF / Timer\n");
|
||
|
||
phba->cmf_timer_cnt = 0;
|
||
hrtimer_start(&phba->cmf_timer,
|
||
ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
|
||
HRTIMER_MODE_REL);
|
||
/* Setup for latency check in IO cmpl routines */
|
||
ktime_get_real_ts64(&phba->cmf_latency);
|
||
|
||
atomic_set(&phba->cmf_bw_wait, 0);
|
||
atomic_set(&phba->cmf_stop_io, 0);
|
||
}
|
||
|
||
/**
|
||
* lpfc_stop_hba_timers - Stop all the timers associated with an HBA
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine stops all the timers associated with a HBA. This function is
|
||
* invoked before either putting a HBA offline or unloading the driver.
|
||
**/
|
||
void
|
||
lpfc_stop_hba_timers(struct lpfc_hba *phba)
|
||
{
|
||
if (phba->pport)
|
||
lpfc_stop_vport_timers(phba->pport);
|
||
cancel_delayed_work_sync(&phba->eq_delay_work);
|
||
cancel_delayed_work_sync(&phba->idle_stat_delay_work);
|
||
del_timer_sync(&phba->sli.mbox_tmo);
|
||
del_timer_sync(&phba->fabric_block_timer);
|
||
del_timer_sync(&phba->eratt_poll);
|
||
del_timer_sync(&phba->hb_tmofunc);
|
||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||
del_timer_sync(&phba->rrq_tmr);
|
||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||
}
|
||
phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
|
||
|
||
switch (phba->pci_dev_grp) {
|
||
case LPFC_PCI_DEV_LP:
|
||
/* Stop any LightPulse device specific driver timers */
|
||
del_timer_sync(&phba->fcp_poll_timer);
|
||
break;
|
||
case LPFC_PCI_DEV_OC:
|
||
/* Stop any OneConnect device specific driver timers */
|
||
lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
|
||
break;
|
||
default:
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"0297 Invalid device group (x%x)\n",
|
||
phba->pci_dev_grp);
|
||
break;
|
||
}
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @mbx_action: flag for mailbox no wait action.
|
||
*
|
||
* This routine marks a HBA's management interface as blocked. Once the HBA's
|
||
* management interface is marked as blocked, all the user space access to
|
||
* the HBA, whether they are from sysfs interface or libdfc interface will
|
||
* all be blocked. The HBA is set to block the management interface when the
|
||
* driver prepares the HBA interface for online or offline.
|
||
**/
|
||
static void
|
||
lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
|
||
{
|
||
unsigned long iflag;
|
||
uint8_t actcmd = MBX_HEARTBEAT;
|
||
unsigned long timeout;
|
||
|
||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||
phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
|
||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||
if (mbx_action == LPFC_MBX_NO_WAIT)
|
||
return;
|
||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||
if (phba->sli.mbox_active) {
|
||
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
|
||
/* Determine how long we might wait for the active mailbox
|
||
* command to be gracefully completed by firmware.
|
||
*/
|
||
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
|
||
phba->sli.mbox_active) * 1000) + jiffies;
|
||
}
|
||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||
|
||
/* Wait for the outstnading mailbox command to complete */
|
||
while (phba->sli.mbox_active) {
|
||
/* Check active mailbox complete status every 2ms */
|
||
msleep(2);
|
||
if (time_after(jiffies, timeout)) {
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"2813 Mgmt IO is Blocked %x "
|
||
"- mbox cmd %x still active\n",
|
||
phba->sli.sli_flag, actcmd);
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_sli4_node_prep - Assign RPIs for active nodes.
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* Allocate RPIs for all active remote nodes. This is needed whenever
|
||
* an SLI4 adapter is reset and the driver is not unloading. Its purpose
|
||
* is to fixup the temporary rpi assignments.
|
||
**/
|
||
void
|
||
lpfc_sli4_node_prep(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_nodelist *ndlp, *next_ndlp;
|
||
struct lpfc_vport **vports;
|
||
int i, rpi;
|
||
|
||
if (phba->sli_rev != LPFC_SLI_REV4)
|
||
return;
|
||
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports == NULL)
|
||
return;
|
||
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||
if (vports[i]->load_flag & FC_UNLOADING)
|
||
continue;
|
||
|
||
list_for_each_entry_safe(ndlp, next_ndlp,
|
||
&vports[i]->fc_nodes,
|
||
nlp_listp) {
|
||
rpi = lpfc_sli4_alloc_rpi(phba);
|
||
if (rpi == LPFC_RPI_ALLOC_ERROR) {
|
||
/* TODO print log? */
|
||
continue;
|
||
}
|
||
ndlp->nlp_rpi = rpi;
|
||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||
LOG_NODE | LOG_DISCOVERY,
|
||
"0009 Assign RPI x%x to ndlp x%px "
|
||
"DID:x%06x flg:x%x\n",
|
||
ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
|
||
ndlp->nlp_flag);
|
||
}
|
||
}
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
}
|
||
|
||
/**
|
||
* lpfc_create_expedite_pool - create expedite pool
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
|
||
* to expedite pool. Mark them as expedite.
|
||
**/
|
||
static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
struct lpfc_io_buf *lpfc_ncmd;
|
||
struct lpfc_io_buf *lpfc_ncmd_next;
|
||
struct lpfc_epd_pool *epd_pool;
|
||
unsigned long iflag;
|
||
|
||
epd_pool = &phba->epd_pool;
|
||
qp = &phba->sli4_hba.hdwq[0];
|
||
|
||
spin_lock_init(&epd_pool->lock);
|
||
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
|
||
spin_lock(&epd_pool->lock);
|
||
INIT_LIST_HEAD(&epd_pool->list);
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&qp->lpfc_io_buf_list_put, list) {
|
||
list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
|
||
lpfc_ncmd->expedite = true;
|
||
qp->put_io_bufs--;
|
||
epd_pool->count++;
|
||
if (epd_pool->count >= XRI_BATCH)
|
||
break;
|
||
}
|
||
spin_unlock(&epd_pool->lock);
|
||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
|
||
}
|
||
|
||
/**
|
||
* lpfc_destroy_expedite_pool - destroy expedite pool
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
|
||
* of HWQ 0. Clear the mark.
|
||
**/
|
||
static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
struct lpfc_io_buf *lpfc_ncmd;
|
||
struct lpfc_io_buf *lpfc_ncmd_next;
|
||
struct lpfc_epd_pool *epd_pool;
|
||
unsigned long iflag;
|
||
|
||
epd_pool = &phba->epd_pool;
|
||
qp = &phba->sli4_hba.hdwq[0];
|
||
|
||
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
|
||
spin_lock(&epd_pool->lock);
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&epd_pool->list, list) {
|
||
list_move_tail(&lpfc_ncmd->list,
|
||
&qp->lpfc_io_buf_list_put);
|
||
lpfc_ncmd->flags = false;
|
||
qp->put_io_bufs++;
|
||
epd_pool->count--;
|
||
}
|
||
spin_unlock(&epd_pool->lock);
|
||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
|
||
}
|
||
|
||
/**
|
||
* lpfc_create_multixri_pools - create multi-XRI pools
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine initialize public, private per HWQ. Then, move XRIs from
|
||
* lpfc_io_buf_list_put to public pool. High and low watermark are also
|
||
* Initialized.
|
||
**/
|
||
void lpfc_create_multixri_pools(struct lpfc_hba *phba)
|
||
{
|
||
u32 i, j;
|
||
u32 hwq_count;
|
||
u32 count_per_hwq;
|
||
struct lpfc_io_buf *lpfc_ncmd;
|
||
struct lpfc_io_buf *lpfc_ncmd_next;
|
||
unsigned long iflag;
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
struct lpfc_multixri_pool *multixri_pool;
|
||
struct lpfc_pbl_pool *pbl_pool;
|
||
struct lpfc_pvt_pool *pvt_pool;
|
||
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
|
||
phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
|
||
phba->sli4_hba.io_xri_cnt);
|
||
|
||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||
lpfc_create_expedite_pool(phba);
|
||
|
||
hwq_count = phba->cfg_hdw_queue;
|
||
count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
|
||
|
||
for (i = 0; i < hwq_count; i++) {
|
||
multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
|
||
|
||
if (!multixri_pool) {
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"1238 Failed to allocate memory for "
|
||
"multixri_pool\n");
|
||
|
||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||
lpfc_destroy_expedite_pool(phba);
|
||
|
||
j = 0;
|
||
while (j < i) {
|
||
qp = &phba->sli4_hba.hdwq[j];
|
||
kfree(qp->p_multixri_pool);
|
||
j++;
|
||
}
|
||
phba->cfg_xri_rebalancing = 0;
|
||
return;
|
||
}
|
||
|
||
qp = &phba->sli4_hba.hdwq[i];
|
||
qp->p_multixri_pool = multixri_pool;
|
||
|
||
multixri_pool->xri_limit = count_per_hwq;
|
||
multixri_pool->rrb_next_hwqid = i;
|
||
|
||
/* Deal with public free xri pool */
|
||
pbl_pool = &multixri_pool->pbl_pool;
|
||
spin_lock_init(&pbl_pool->lock);
|
||
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
|
||
spin_lock(&pbl_pool->lock);
|
||
INIT_LIST_HEAD(&pbl_pool->list);
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&qp->lpfc_io_buf_list_put, list) {
|
||
list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
|
||
qp->put_io_bufs--;
|
||
pbl_pool->count++;
|
||
}
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
|
||
pbl_pool->count, i);
|
||
spin_unlock(&pbl_pool->lock);
|
||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
|
||
|
||
/* Deal with private free xri pool */
|
||
pvt_pool = &multixri_pool->pvt_pool;
|
||
pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
|
||
pvt_pool->low_watermark = XRI_BATCH;
|
||
spin_lock_init(&pvt_pool->lock);
|
||
spin_lock_irqsave(&pvt_pool->lock, iflag);
|
||
INIT_LIST_HEAD(&pvt_pool->list);
|
||
pvt_pool->count = 0;
|
||
spin_unlock_irqrestore(&pvt_pool->lock, iflag);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_destroy_multixri_pools - destroy multi-XRI pools
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine returns XRIs from public/private to lpfc_io_buf_list_put.
|
||
**/
|
||
static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
|
||
{
|
||
u32 i;
|
||
u32 hwq_count;
|
||
struct lpfc_io_buf *lpfc_ncmd;
|
||
struct lpfc_io_buf *lpfc_ncmd_next;
|
||
unsigned long iflag;
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
struct lpfc_multixri_pool *multixri_pool;
|
||
struct lpfc_pbl_pool *pbl_pool;
|
||
struct lpfc_pvt_pool *pvt_pool;
|
||
|
||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||
lpfc_destroy_expedite_pool(phba);
|
||
|
||
if (!(phba->pport->load_flag & FC_UNLOADING))
|
||
lpfc_sli_flush_io_rings(phba);
|
||
|
||
hwq_count = phba->cfg_hdw_queue;
|
||
|
||
for (i = 0; i < hwq_count; i++) {
|
||
qp = &phba->sli4_hba.hdwq[i];
|
||
multixri_pool = qp->p_multixri_pool;
|
||
if (!multixri_pool)
|
||
continue;
|
||
|
||
qp->p_multixri_pool = NULL;
|
||
|
||
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
|
||
|
||
/* Deal with public free xri pool */
|
||
pbl_pool = &multixri_pool->pbl_pool;
|
||
spin_lock(&pbl_pool->lock);
|
||
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
|
||
pbl_pool->count, i);
|
||
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&pbl_pool->list, list) {
|
||
list_move_tail(&lpfc_ncmd->list,
|
||
&qp->lpfc_io_buf_list_put);
|
||
qp->put_io_bufs++;
|
||
pbl_pool->count--;
|
||
}
|
||
|
||
INIT_LIST_HEAD(&pbl_pool->list);
|
||
pbl_pool->count = 0;
|
||
|
||
spin_unlock(&pbl_pool->lock);
|
||
|
||
/* Deal with private free xri pool */
|
||
pvt_pool = &multixri_pool->pvt_pool;
|
||
spin_lock(&pvt_pool->lock);
|
||
|
||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||
"1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
|
||
pvt_pool->count, i);
|
||
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&pvt_pool->list, list) {
|
||
list_move_tail(&lpfc_ncmd->list,
|
||
&qp->lpfc_io_buf_list_put);
|
||
qp->put_io_bufs++;
|
||
pvt_pool->count--;
|
||
}
|
||
|
||
INIT_LIST_HEAD(&pvt_pool->list);
|
||
pvt_pool->count = 0;
|
||
|
||
spin_unlock(&pvt_pool->lock);
|
||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
|
||
|
||
kfree(multixri_pool);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* lpfc_online - Initialize and bring a HBA online
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine initializes the HBA and brings a HBA online. During this
|
||
* process, the management interface is blocked to prevent user space access
|
||
* to the HBA interfering with the driver initialization.
|
||
*
|
||
* Return codes
|
||
* 0 - successful
|
||
* 1 - failed
|
||
**/
|
||
int
|
||
lpfc_online(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_vport *vport;
|
||
struct lpfc_vport **vports;
|
||
int i, error = 0;
|
||
bool vpis_cleared = false;
|
||
|
||
if (!phba)
|
||
return 0;
|
||
vport = phba->pport;
|
||
|
||
if (!(vport->fc_flag & FC_OFFLINE_MODE))
|
||
return 0;
|
||
|
||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||
"0458 Bring Adapter online\n");
|
||
|
||
lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
|
||
|
||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||
if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
|
||
lpfc_unblock_mgmt_io(phba);
|
||
return 1;
|
||
}
|
||
spin_lock_irq(&phba->hbalock);
|
||
if (!phba->sli4_hba.max_cfg_param.vpi_used)
|
||
vpis_cleared = true;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
|
||
/* Reestablish the local initiator port.
|
||
* The offline process destroyed the previous lport.
|
||
*/
|
||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
|
||
!phba->nvmet_support) {
|
||
error = lpfc_nvme_create_localport(phba->pport);
|
||
if (error)
|
||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||
"6132 NVME restore reg failed "
|
||
"on nvmei error x%x\n", error);
|
||
}
|
||
} else {
|
||
lpfc_sli_queue_init(phba);
|
||
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
|
||
lpfc_unblock_mgmt_io(phba);
|
||
return 1;
|
||
}
|
||
}
|
||
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL) {
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||
struct Scsi_Host *shost;
|
||
shost = lpfc_shost_from_vport(vports[i]);
|
||
spin_lock_irq(shost->host_lock);
|
||
vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
|
||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
|
||
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
|
||
if ((vpis_cleared) &&
|
||
(vports[i]->port_type !=
|
||
LPFC_PHYSICAL_PORT))
|
||
vports[i]->vpi = 0;
|
||
}
|
||
spin_unlock_irq(shost->host_lock);
|
||
}
|
||
}
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
|
||
if (phba->cfg_xri_rebalancing)
|
||
lpfc_create_multixri_pools(phba);
|
||
|
||
lpfc_cpuhp_add(phba);
|
||
|
||
lpfc_unblock_mgmt_io(phba);
|
||
return 0;
|
||
}
|
||
|
||
/**
|
||
* lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine marks a HBA's management interface as not blocked. Once the
|
||
* HBA's management interface is marked as not blocked, all the user space
|
||
* access to the HBA, whether they are from sysfs interface or libdfc
|
||
* interface will be allowed. The HBA is set to block the management interface
|
||
* when the driver prepares the HBA interface for online or offline and then
|
||
* set to unblock the management interface afterwards.
|
||
**/
|
||
void
|
||
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
|
||
{
|
||
unsigned long iflag;
|
||
|
||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||
phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
|
||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||
}
|
||
|
||
/**
|
||
* lpfc_offline_prep - Prepare a HBA to be brought offline
|
||
* @phba: pointer to lpfc hba data structure.
|
||
* @mbx_action: flag for mailbox shutdown action.
|
||
*
|
||
* This routine is invoked to prepare a HBA to be brought offline. It performs
|
||
* unregistration login to all the nodes on all vports and flushes the mailbox
|
||
* queue to make it ready to be brought offline.
|
||
**/
|
||
void
|
||
lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
|
||
{
|
||
struct lpfc_vport *vport = phba->pport;
|
||
struct lpfc_nodelist *ndlp, *next_ndlp;
|
||
struct lpfc_vport **vports;
|
||
struct Scsi_Host *shost;
|
||
int i;
|
||
int offline;
|
||
bool hba_pci_err;
|
||
|
||
if (vport->fc_flag & FC_OFFLINE_MODE)
|
||
return;
|
||
|
||
lpfc_block_mgmt_io(phba, mbx_action);
|
||
|
||
lpfc_linkdown(phba);
|
||
|
||
offline = pci_channel_offline(phba->pcidev);
|
||
hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
|
||
|
||
/* Issue an unreg_login to all nodes on all vports */
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL) {
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||
if (vports[i]->load_flag & FC_UNLOADING)
|
||
continue;
|
||
shost = lpfc_shost_from_vport(vports[i]);
|
||
spin_lock_irq(shost->host_lock);
|
||
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
|
||
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||
vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
|
||
spin_unlock_irq(shost->host_lock);
|
||
|
||
shost = lpfc_shost_from_vport(vports[i]);
|
||
list_for_each_entry_safe(ndlp, next_ndlp,
|
||
&vports[i]->fc_nodes,
|
||
nlp_listp) {
|
||
|
||
spin_lock_irq(&ndlp->lock);
|
||
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
||
spin_unlock_irq(&ndlp->lock);
|
||
|
||
if (offline || hba_pci_err) {
|
||
spin_lock_irq(&ndlp->lock);
|
||
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
|
||
NLP_RPI_REGISTERED);
|
||
spin_unlock_irq(&ndlp->lock);
|
||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||
lpfc_sli_rpi_release(vports[i],
|
||
ndlp);
|
||
} else {
|
||
lpfc_unreg_rpi(vports[i], ndlp);
|
||
}
|
||
/*
|
||
* Whenever an SLI4 port goes offline, free the
|
||
* RPI. Get a new RPI when the adapter port
|
||
* comes back online.
|
||
*/
|
||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||
lpfc_printf_vlog(vports[i], KERN_INFO,
|
||
LOG_NODE | LOG_DISCOVERY,
|
||
"0011 Free RPI x%x on "
|
||
"ndlp: x%px did x%x\n",
|
||
ndlp->nlp_rpi, ndlp,
|
||
ndlp->nlp_DID);
|
||
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
|
||
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
|
||
}
|
||
|
||
if (ndlp->nlp_type & NLP_FABRIC) {
|
||
lpfc_disc_state_machine(vports[i], ndlp,
|
||
NULL, NLP_EVT_DEVICE_RECOVERY);
|
||
|
||
/* Don't remove the node unless the node
|
||
* has been unregistered with the
|
||
* transport, and we're not in recovery
|
||
* before dev_loss_tmo triggered.
|
||
* Otherwise, let dev_loss take care of
|
||
* the node.
|
||
*/
|
||
if (!(ndlp->save_flags &
|
||
NLP_IN_RECOV_POST_DEV_LOSS) &&
|
||
!(ndlp->fc4_xpt_flags &
|
||
(NVME_XPT_REGD | SCSI_XPT_REGD)))
|
||
lpfc_disc_state_machine
|
||
(vports[i], ndlp,
|
||
NULL,
|
||
NLP_EVT_DEVICE_RM);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
|
||
lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
|
||
|
||
if (phba->wq)
|
||
flush_workqueue(phba->wq);
|
||
}
|
||
|
||
/**
|
||
* lpfc_offline - Bring a HBA offline
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine actually brings a HBA offline. It stops all the timers
|
||
* associated with the HBA, brings down the SLI layer, and eventually
|
||
* marks the HBA as in offline state for the upper layer protocol.
|
||
**/
|
||
void
|
||
lpfc_offline(struct lpfc_hba *phba)
|
||
{
|
||
struct Scsi_Host *shost;
|
||
struct lpfc_vport **vports;
|
||
int i;
|
||
|
||
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
|
||
return;
|
||
|
||
/* stop port and all timers associated with this hba */
|
||
lpfc_stop_port(phba);
|
||
|
||
/* Tear down the local and target port registrations. The
|
||
* nvme transports need to cleanup.
|
||
*/
|
||
lpfc_nvmet_destroy_targetport(phba);
|
||
lpfc_nvme_destroy_localport(phba->pport);
|
||
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL)
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
|
||
lpfc_stop_vport_timers(vports[i]);
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||
"0460 Bring Adapter offline\n");
|
||
/* Bring down the SLI Layer and cleanup. The HBA is offline
|
||
now. */
|
||
lpfc_sli_hba_down(phba);
|
||
spin_lock_irq(&phba->hbalock);
|
||
phba->work_ha = 0;
|
||
spin_unlock_irq(&phba->hbalock);
|
||
vports = lpfc_create_vport_work_array(phba);
|
||
if (vports != NULL)
|
||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||
shost = lpfc_shost_from_vport(vports[i]);
|
||
spin_lock_irq(shost->host_lock);
|
||
vports[i]->work_port_events = 0;
|
||
vports[i]->fc_flag |= FC_OFFLINE_MODE;
|
||
spin_unlock_irq(shost->host_lock);
|
||
}
|
||
lpfc_destroy_vport_work_array(phba, vports);
|
||
/* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
|
||
* in hba_unset
|
||
*/
|
||
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
|
||
__lpfc_cpuhp_remove(phba);
|
||
|
||
if (phba->cfg_xri_rebalancing)
|
||
lpfc_destroy_multixri_pools(phba);
|
||
}
|
||
|
||
/**
|
||
* lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is to free all the SCSI buffers and IOCBs from the driver
|
||
* list back to kernel. It is called from lpfc_pci_remove_one to free
|
||
* the internal resources before the device is removed from the system.
|
||
**/
|
||
static void
|
||
lpfc_scsi_free(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_io_buf *sb, *sb_next;
|
||
|
||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
|
||
return;
|
||
|
||
spin_lock_irq(&phba->hbalock);
|
||
|
||
/* Release all the lpfc_scsi_bufs maintained by this host. */
|
||
|
||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
|
||
list) {
|
||
list_del(&sb->list);
|
||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||
sb->dma_handle);
|
||
kfree(sb);
|
||
phba->total_scsi_bufs--;
|
||
}
|
||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||
|
||
spin_lock(&phba->scsi_buf_list_get_lock);
|
||
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
|
||
list) {
|
||
list_del(&sb->list);
|
||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||
sb->dma_handle);
|
||
kfree(sb);
|
||
phba->total_scsi_bufs--;
|
||
}
|
||
spin_unlock(&phba->scsi_buf_list_get_lock);
|
||
spin_unlock_irq(&phba->hbalock);
|
||
}
|
||
|
||
/**
|
||
* lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
|
||
* @phba: pointer to lpfc hba data structure.
|
||
*
|
||
* This routine is to free all the IO buffers and IOCBs from the driver
|
||
* list back to kernel. It is called from lpfc_pci_remove_one to free
|
||
* the internal resources before the device is removed from the system.
|
||
**/
|
||
void
|
||
lpfc_io_free(struct lpfc_hba *phba)
|
||
{
|
||
struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||
struct lpfc_sli4_hdw_queue *qp;
|
||
int idx;
|
||
|
||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
|
||
qp = &phba->sli4_hba.hdwq[idx];
|
||
/* Release all the lpfc_nvme_bufs maintained by this host. */
|
||
spin_lock(&qp->io_buf_list_put_lock);
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&qp->lpfc_io_buf_list_put,
|
||
list) {
|
||
list_del(&lpfc_ncmd->list);
|
||
qp->put_io_bufs--;
|
||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||
if (phba->cfg_xpsgl && !phba->nvmet_support)
|
||
lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
|
||
lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
|
||
kfree(lpfc_ncmd);
|
||
qp->total_io_bufs--;
|
||
}
|
||
spin_unlock(&qp->io_buf_list_put_lock);
|
||
|
||
spin_lock(&qp->io_buf_list_get_lock);
|
||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||
&qp->lpfc_io_buf_list_get,
|
||
list) {
|
||
list_del(&lpfc_ncmd->list);
|
||
qp->get_io_bufs--;
|
||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||