Newer
Older
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
mk_tid_release(skb, chan, p - adap->tids.tid_tab);
t4_ofld_send(adap, skb);
spin_lock_bh(&adap->tid_release_lock);
}
adap->tid_release_task_busy = false;
spin_unlock_bh(&adap->tid_release_lock);
}
/*
* Release a TID and inform HW. If we are unable to allocate the release
* message we defer to a work queue.
*/
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{
void *old;
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
old = t->tid_tab[tid];
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb);
} else
cxgb4_queue_tid_release(t, chan, tid);
if (old)
atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb4_remove_tid);
/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
static int tid_init(struct tid_info *t)
{
size_t size;
unsigned int natids = t->natids;
size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
BITS_TO_LONGS(t->nstids) * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
t->stids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
while (--natids)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids);
return 0;
}
/**
* cxgb4_create_server - create an IP server
* @dev: the device
* @stid: the server TID
* @sip: local IP address to bind server to
* @sport: the server's TCP port
* @queue: queue to direct messages from this server to
*
* Create an IP server for the given port and address.
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
struct adapter *adap;
struct cpl_pass_open_req *req;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
adap = netdev2adap(dev);
req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
req->local_port = sport;
req->peer_port = htons(0);
req->local_ip = sip;
req->peer_ip = htonl(0);
chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
return t4_mgmt_tx(adap, skb);
}
EXPORT_SYMBOL(cxgb4_create_server);
/**
* cxgb4_create_server6 - create an IPv6 server
* @dev: the device
* @stid: the server TID
* @sip: local IPv6 address to bind server to
* @sport: the server's TCP port
* @queue: queue to direct messages from this server to
*
* Create an IPv6 server for the given port and address.
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
const struct in6_addr *sip, __be16 sport,
unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
struct adapter *adap;
struct cpl_pass_open_req6 *req;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
adap = netdev2adap(dev);
req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
req->local_port = sport;
req->peer_port = htons(0);
req->local_ip_hi = *(__be64 *)(sip->s6_addr);
req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
req->peer_ip_hi = cpu_to_be64(0);
req->peer_ip_lo = cpu_to_be64(0);
chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
return t4_mgmt_tx(adap, skb);
}
EXPORT_SYMBOL(cxgb4_create_server6);
/**
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
* @mtus: the HW MTU table
* @mtu: the target MTU
* @idx: index of selected entry in the MTU table
*
* Returns the index and the value in the HW MTU table that is closest to
* but does not exceed @mtu, unless @mtu is smaller than any value in the
* table, in which case that smallest available value is selected.
*/
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx)
{
unsigned int i = 0;
while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
++i;
if (idx)
*idx = i;
return mtus[i];
}
EXPORT_SYMBOL(cxgb4_best_mtu);
/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
* Return the HW Tx channel of the given port.
*/
unsigned int cxgb4_port_chan(const struct net_device *dev)
{
return netdev2pinfo(dev)->tx_chan;
}
EXPORT_SYMBOL(cxgb4_port_chan);
/**
* cxgb4_port_viid - get the VI id of a port
* @dev: the net device for the port
*
* Return the VI id of the given port.
*/
unsigned int cxgb4_port_viid(const struct net_device *dev)
{
return netdev2pinfo(dev)->viid;
}
EXPORT_SYMBOL(cxgb4_port_viid);
/**
* cxgb4_port_idx - get the index of a port
* @dev: the net device for the port
*
* Return the index of the given port.
*/
unsigned int cxgb4_port_idx(const struct net_device *dev)
{
return netdev2pinfo(dev)->port_id;
}
EXPORT_SYMBOL(cxgb4_port_idx);
/**
* cxgb4_netdev_by_hwid - return the net device of a HW port
* @pdev: identifies the adapter
* @id: the HW port id
*
* Return the net device associated with the interface with the given HW
* id.
*/
struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
{
const struct adapter *adap = pci_get_drvdata(pdev);
if (!adap || id >= NCHAN)
return NULL;
id = adap->chan_map[id];
return id < MAX_NPORTS ? adap->port[id] : NULL;
}
EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
struct adapter *adap = pci_get_drvdata(pdev);
spin_lock(&adap->stats_lock);
t4_tp_get_tcp_stats(adap, v4, v6);
spin_unlock(&adap->stats_lock);
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
const unsigned int *pgsz_order)
{
struct adapter *adap = netdev2adap(dev);
t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
HPZ3(pgsz_order[3]));
}
EXPORT_SYMBOL(cxgb4_iscsi_init);
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
{
const struct device *parent;
const struct net_device *netdev = neigh->dev;
if (netdev->priv_flags & IFF_802_1Q_VLAN)
netdev = vlan_dev_real_dev(netdev);
parent = netdev->dev.parent;
if (parent && parent->driver == &cxgb4_driver.driver)
t4_l2t_update(dev_get_drvdata(parent), neigh);
}
static int netevent_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
switch (event) {
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
case NETEVENT_PMTU_UPDATE:
case NETEVENT_REDIRECT:
default:
break;
}
return 0;
}
static bool netevent_registered;
static struct notifier_block cxgb4_netevent_nb = {
.notifier_call = netevent_cb
};
static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
lli.vr = &adap->vres;
lli.mtus = adap->params.mtus;
if (uld == CXGB4_ULD_RDMA) {
lli.rxq_ids = adap->sge.rdma_rxq;
lli.nrxq = adap->sge.rdmaqs;
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.ofld_rxq;
lli.nrxq = adap->sge.ofldqsets;
}
lli.ntxq = adap->sge.ofldqsets;
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.rev;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
dev_warn(adap->pdev_dev,
"could not attach to the %s driver, error %ld\n",
uld_str[uld], PTR_ERR(handle));
return;
}
adap->uld_handle[uld] = handle;
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
}
static void attach_ulds(struct adapter *adap)
{
unsigned int i;
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (ulds[i].add)
uld_attach(adap, i);
mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
{
unsigned int i;
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld_handle[i]) {
ulds[i].state_change(adap->uld_handle[i],
CXGB4_STATE_DETACH);
adap->uld_handle[i] = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
{
unsigned int i;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld_handle[i])
ulds[i].state_change(adap->uld_handle[i], new_state);
mutex_unlock(&uld_mutex);
}
/**
* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
*
* Registers an upper-layer driver with this driver and notifies the ULD
* about any presently available devices that support its type. Returns
* %-EBUSY if a ULD of the same type is already registered.
*/
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
{
int ret = 0;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
if (ulds[type].add) {
ret = -EBUSY;
goto out;
}
ulds[type] = *p;
list_for_each_entry(adap, &adapter_list, list_node)
uld_attach(adap, type);
out: mutex_unlock(&uld_mutex);
return ret;
}
EXPORT_SYMBOL(cxgb4_register_uld);
/**
* cxgb4_unregister_uld - unregister an upper-layer driver
* @type: the ULD type
*
* Unregisters an existing upper-layer driver.
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node)
adap->uld_handle[type] = NULL;
ulds[type].add = NULL;
mutex_unlock(&uld_mutex);
return 0;
}
EXPORT_SYMBOL(cxgb4_unregister_uld);
/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
* the initialization of HW modules, and enabling interrupts.
*
* Must be called with the rtnl lock held.
*/
static int cxgb_up(struct adapter *adap)
{
int err = 0;
if (!(adap->flags & FULL_INIT_DONE)) {
err = setup_sge_queues(adap);
if (err)
goto out;
err = setup_rss(adap);
if (err) {
t4_free_sge_resources(adap);
goto out;
}
if (adap->flags & USING_MSIX)
name_msix_vecs(adap);
adap->flags |= FULL_INIT_DONE;
}
if (adap->flags & USING_MSIX) {
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
adap->name, adap);
if (err)
goto irq_err;
}
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
notify_ulds(adap, CXGB4_STATE_UP);
out:
return err;
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
goto out;
}
static void cxgb_down(struct adapter *adapter)
{
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
adapter->tid_release_task_busy = false;
if (adapter->flags & USING_MSIX) {
free_msix_queue_irqs(adapter);
free_irq(adapter->msix_info[0].vec, adapter);
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
}
/*
* net_device operations
*/
static int cxgb_open(struct net_device *dev)
{
int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
return err;
dev->real_num_tx_queues = pi->nqsets;
set_bit(pi->tx_chan, &adapter->open_device_map);
link_start(dev);
netif_tx_start_all_queues(dev);
return 0;
}
static int cxgb_close(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
clear_bit(pi->tx_chan, &adapter->open_device_map);
if (!adapter->open_device_map)
cxgb_down(adapter);
return 0;
}
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
struct net_device_stats *ns = &dev->stats;
spin_lock(&adapter->stats_lock);
t4_get_port_stats(adapter, p->tx_chan, &stats);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
ns->tx_packets = stats.tx_frames;
ns->rx_bytes = stats.rx_octets;
ns->rx_packets = stats.rx_frames;
ns->multicast = stats.rx_mcast_frames;
/* detailed rx_errors */
ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
stats.rx_runt;
ns->rx_over_errors = 0;
ns->rx_crc_errors = stats.rx_fcs_err;
ns->rx_frame_errors = stats.rx_symbol_err;
ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
stats.rx_ovflow2 + stats.rx_ovflow3 +
stats.rx_trunc0 + stats.rx_trunc1 +
stats.rx_trunc2 + stats.rx_trunc3;
ns->rx_missed_errors = 0;
/* detailed tx_errors */
ns->tx_aborted_errors = 0;
ns->tx_carrier_errors = 0;
ns->tx_fifo_errors = 0;
ns->tx_heartbeat_errors = 0;
ns->tx_window_errors = 0;
ns->tx_errors = stats.tx_error_frames;
ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
return ns;
}
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
case SIOCGMIIPHY:
if (pi->mdio_addr < 0)
return -EOPNOTSUPP;
data->phy_id = pi->mdio_addr;
break;
case SIOCGMIIREG:
case SIOCSMIIREG:
if (mdio_phy_id_is_c45(data->phy_id)) {
prtad = mdio_phy_id_prtad(data->phy_id);
devad = mdio_phy_id_devad(data->phy_id);
} else if (data->phy_id < 32) {
prtad = data->phy_id;
devad = 0;
data->reg_num &= 0x1f;
} else
return -EINVAL;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
data->reg_num, &data->val_out);
else
ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
data->reg_num, data->val_in);
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static void cxgb_set_rxmode(struct net_device *dev)
{
/* unfortunately we can't return errors to the stack */
set_rxmode(dev, -1, false);
}
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
struct port_info *pi = netdev_priv(dev);
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
true);
if (!ret)
dev->mtu = new_mtu;
return ret;
}
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
int ret;
struct sockaddr *addr = p;
struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
addr->sa_data, true, true);
if (ret < 0)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
pi->xact_addr_filt = ret;
return 0;
}
static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
true);
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
if (adap->flags & USING_MSIX) {
int i;
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
for (i = pi->nqsets; i; i--, rx++)
t4_sge_intr_msix(0, &rx->rspq);
} else
t4_intr_handler(adap)(0, adap);
}
#endif
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
.ndo_start_xmit = t4_eth_xmit,
.ndo_get_stats = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
};
void t4_fatal_err(struct adapter *adap)
{
t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
t4_intr_disable(adap);
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
static void setup_memwin(struct adapter *adap)
{
u32 bar0;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
(bar0 + MEMWIN0_BASE) | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
(bar0 + MEMWIN1_BASE) | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
(bar0 + MEMWIN2_BASE) | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
#define MAX_ATIDS 8192U
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
static int adap_init0(struct adapter *adap)
{
int ret;
u32 v, port_vec;
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd c;
ret = t4_check_fw_version(adap);
if (ret == -EINVAL || ret > 0) {
if (upgrade_fw(adap) >= 0) /* recache FW version */
ret = t4_check_fw_version(adap);
}
if (ret < 0)
return ret;
/* contact FW, request master */
ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
return ret;
}
/* reset device */
ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
if (ret < 0)
goto bye;
/* get device capabilities */
memset(&c, 0, sizeof(c));
c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
c.retval_len16 = htonl(FW_LEN16(c));
ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
if (ret < 0)
goto bye;
/* select capabilities we'll be using */
if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
if (!vf_acls)
c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
else
c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
} else if (vf_acls) {
dev_err(adap->pdev_dev, "virtualization ACLs not supported");
goto bye;
}
c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
if (ret < 0)
goto bye;
ret = t4_config_glbl_rss(adap, 0,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
if (ret < 0)
goto bye;
ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
FW_CMD_CAP_PF, FW_CMD_CAP_PF);
if (ret < 0)
goto bye;
for (v = 0; v < SGE_NTIMERS - 1; v++)
adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
adap->sge.counter_val[0] = 1;
for (v = 1; v < SGE_NCOUNTERS; v++)
adap->sge.counter_val[v] = min(intr_cnt[v - 1],
THRESHOLD_3_MASK);
t4_sge_init(adap);
/* get basic stuff going */
ret = t4_early_init(adap, 0);
if (ret < 0)
goto bye;
#define FW_PARAM_DEV(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
params[0] = FW_PARAM_DEV(PORTVEC);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
if (ret < 0)
goto bye;
port_vec = val[0];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
if (c.ofldcaps) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
params[2] = FW_PARAM_PFVF(SERVER_END);
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->tids.ntids = val[0];
adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
adap->tids.stid_base = val[1];
adap->tids.nstids = val[2] - val[1] + 1;
adap->vres.ddp.start = val[3];
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
}
if (c.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
params[1] = FW_PARAM_PFVF(STAG_END);
params[2] = FW_PARAM_PFVF(RQ_START);
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->vres.stag.start = val[0];
adap->vres.stag.size = val[1] - val[0] + 1;
adap->vres.rq.start = val[2];
adap->vres.rq.size = val[3] - val[2] + 1;
adap->vres.pbl.start = val[4];
adap->vres.pbl.size = val[5] - val[4] + 1;
}
if (c.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
adap->params.nports = hweight32(port_vec);
adap->params.portvec = port_vec;
adap->flags |= FW_OK;
/* These are finalized by FW initialization, load their values now */
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
/* tweak some settings */
t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
setup_memwin(adap);
return 0;
/*
* If a command timed out or failed with EIO FW does not operate within
* its spec or something catastrophic happened to HW/FW, stop issuing
* commands.
*/
bye: if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, 0);
return ret;
}
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
unsigned int size, unsigned int iqe_size)
{
q->intr_params = QINTR_TIMER_IDX(timer_idx) |
(pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
q->iqe_len = iqe_size;
q->size = size;
}
/*
* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static void __devinit cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
if (q10g > num_online_cpus())
q10g = num_online_cpus();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
if (is_offload(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
i = min_t(int, ARRAY_SIZE(s->ofldrxq),
num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
init_rspq(&r->rspq, 0, 0, 1024, 64);
r->fl.size = 72;
}
for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
s->ethtxq[i].q.size = 1024;
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
s->ctrlq[i].q.size = 512;
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
struct sge_ofld_rxq *r = &s->ofldrxq[i];
init_rspq(&r->rspq, 0, 0, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
r->fl.size = 72;
}
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
init_rspq(&r->rspq, 0, 0, 511, 64);
r->rspq.uld = CXGB4_ULD_RDMA;
r->fl.size = 72;
}
init_rspq(&s->fw_evtq, 6, 0, 512, 64);
init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
}