Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
dst_release(old_dst);
}
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
write_unlock(&sk->sk_dst_lock);
}
static inline void
__sk_dst_reset(struct sock *sk)
{
struct dst_entry *old_dst;
old_dst = sk->sk_dst_cache;
sk->sk_dst_cache = NULL;
dst_release(old_dst);
}
static inline void
sk_dst_reset(struct sock *sk)
{
write_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
write_unlock(&sk->sk_dst_lock);
}
extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline int sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
__sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
sk->sk_route_caps |= NETIF_F_GSO_MASK;
if (sk_can_gso(sk)) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
else
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
{
sk->sk_wmem_queued += skb->truesize;
sk->sk_forward_alloc -= skb->truesize;
}
static inline int skb_copy_to_page(struct sock *sk, char __user *from,
struct sk_buff *skb, struct page *page,
int off, int copy)
{
if (skb->ip_summed == CHECKSUM_NONE) {
int err = 0;
unsigned int csum = csum_and_copy_from_user(from,
page_address(page) + off,
copy, 0, &err);
if (err)
return err;
skb->csum = csum_block_add(skb->csum, csum, skb->len);
} else if (copy_from_user(page_address(page) + off, from, copy))
return -EFAULT;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk->sk_forward_alloc -= copy;
return 0;
}
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
* and play with them.
*
* Inlined as it's very short and called for pretty much every
* packet ever received.
*/
static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_wfree;
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
}
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
}
extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
unsigned long expires);
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf)
return -ENOMEM;
skb_set_owner_r(skb, sk);
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 0;
}
/*
* Recover an error report and clear atomically
*/
static inline int sock_error(struct sock *sk)
{
int err;
if (likely(!sk->sk_err))
return 0;
err = xchg(&sk->sk_err, 0);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
return -err;
}
static inline unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
}
return amt;
}
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
if (sk->sk_socket && sk->sk_socket->fasync_list)
sock_wake_async(sk->sk_socket, how, band);
}
#define SOCK_MIN_SNDBUF 2048
#define SOCK_MIN_RCVBUF 256
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
struct sk_buff *skb;
int hdr_len;
hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
skb = alloc_skb_fclone(size + hdr_len, gfp);
if (sk_stream_wmem_schedule(sk, skb->truesize)) {
skb_reserve(skb, hdr_len);
return skb;
}
__kfree_skb(skb);
} else {
sk->sk_prot->enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
return NULL;
}
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
{
return sk_stream_alloc_pskb(sk, size, 0, gfp);
}
static inline struct page *sk_stream_alloc_page(struct sock *sk)
{
struct page *page = NULL;
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
sk->sk_prot->enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
return page;
}
#define sk_stream_for_retrans_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
/*from STCP for fast SACK Process*/
#define sk_stream_for_retrans_queue_from(skb, sk) \
for (; (skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
static inline int sock_writeable(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
{
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}
static inline long sock_sndtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
}
/* Alas, with timeout socket operations are not restartable.
* Compare this to poll().
*/
static inline int sock_intr_errno(long timeo)
{
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
struct timeval stamp;
skb_get_timestamp(skb, &stamp);
if (sock_flag(sk, SOCK_RCVTSTAMP)) {
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (stamp.tv_sec == 0)
do_gettimeofday(&stamp);
skb_set_timestamp(skb, &stamp);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
sk->sk_stamp = stamp;
}
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
* @skb: socket buffer to eat
* @copied_early: flag indicating whether DMA operations copied this data early
*
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
#ifdef CONFIG_NET_DMA
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
if (!copied_early)
__kfree_skb(skb);
else
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
}
#else
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
extern void sock_enable_timestamp(struct sock *sk);
extern int sock_get_timestamp(struct sock *, struct timeval __user *);
/*
* Enable debug/info messages
*/
#ifdef CONFIG_NETDEBUG
#define NETDEBUG(fmt, args...) printk(fmt,##args)
#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
#else
#define NETDEBUG(fmt, args...) do { } while (0)
#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
#endif
/*
* Macros for sleeping on a socket. Use them like this:
*
* SOCK_SLEEP_PRE(sk)
* if (condition)
* schedule();
* SOCK_SLEEP_POST(sk)
*
* N.B. These are now obsolete and were, afaik, only ever used in DECnet
* and when the last use of them in DECnet has gone, I'm intending to
* remove them.
*/
#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
DECLARE_WAITQUEUE(wait, tsk); \
tsk->state = TASK_INTERRUPTIBLE; \
add_wait_queue((sk)->sk_sleep, &wait); \
release_sock(sk);
#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
remove_wait_queue((sk)->sk_sleep, &wait); \
lock_sock(sk); \
}
static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
{
if (valbool)
sock_set_flag(sk, bit);
else
sock_reset_flag(sk, bit);
}
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
#ifdef CONFIG_NET
int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
#else
static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return -ENODEV;
}
#endif
extern void sk_init(void);
#ifdef CONFIG_SYSCTL
extern struct ctl_table core_table[];
#endif
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;