VSF Documented
skbuff.h
Go to the documentation of this file.
1#ifndef __VSF_LINUX_SKBUFF_H__
2#define __VSF_LINUX_SKBUFF_H__
3
4#include <linux/types.h>
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/llist.h>
8#include <linux/refcount.h>
9#include <linux/spinlock.h>
10
11#ifdef __cplusplus
12extern "C" {
13#endif
14
15typedef unsigned int sk_buff_data_t;
16
20};
21
22struct sk_buff {
23 union {
24 struct {
25 struct sk_buff *next;
26 struct sk_buff *prev;
27 };
30 };
31
32 char cb[48];
33
34 unsigned int len, data_len;
37 unsigned char *head, *data;
39};
40
42 struct sk_buff *next;
43 struct sk_buff *prev;
44};
45
47 union {
48 struct {
49 struct sk_buff *next;
50 struct sk_buff *prev;
51 };
53 };
56};
57
58struct sk_buff * alloc_skb(unsigned int size, gfp_t priority);
59
60static inline bool skb_unref(struct sk_buff *skb)
61{
62 return refcount_dec_and_test(&skb->users);
63}
64
65static inline void skb_reserve(struct sk_buff *skb, int len)
66{
67 skb->data += len;
68 skb->tail += len;
69}
70
71static inline unsigned char * skb_tail_pointer(const struct sk_buff *skb)
72{
73 return skb->head + skb->tail;
74}
75
76static inline void skb_reset_tail_pointer(struct sk_buff *skb)
77{
78 skb->tail = skb->data - skb->head;
79}
80
81static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
82{
83 skb_reset_tail_pointer(skb);
84 skb->tail += offset;
85}
86
87static inline unsigned char * skb_end_pointer(const struct sk_buff *skb)
88{
89 return skb->head + skb->end;
90}
91
92static inline unsigned int skb_end_offset(const struct sk_buff *skb)
93{
94 return skb->end;
95}
96
97static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
98{
99 skb->end = offset;
100}
101
102static inline void * skb_put(struct sk_buff *skb, int len)
103{
104 void *tmp = skb_tail_pointer(skb);
105 skb->tail += len;
106 skb->len += len;
107 return tmp;
108}
109
110static inline void * skb_put_zero(struct sk_buff *skb, unsigned int len)
111{
112 void *tmp = skb_put(skb, len);
113 memset(tmp, 0, len);
114 return tmp;
115}
116
117static inline void * skb_put_data(struct sk_buff *skb, const void *data, unsigned int len)
118{
119 void *tmp = skb_put(skb, len);
120 memcpy(tmp, data, len);
121 return tmp;
122}
123
124static inline void skb_put_u8(struct sk_buff *skb, u8 val)
125{
126 *(u8 *)skb_put(skb, 1) = val;
127}
128
129static inline void * skb_push(struct sk_buff *skb, unsigned int len)
130{
131 skb->data -= len;
132 skb->len += len;
133 return skb->data;
134}
135
136static inline void * skb_pull(struct sk_buff *skb, unsigned int len)
137{
138 skb->len -= len;
139 return skb->data += len;
140}
141
142static inline struct sk_buff * skb_get(struct sk_buff *skb)
143{
144 refcount_inc(&skb->users);
145 return skb;
146}
147
148static inline unsigned int skb_headroom(const struct sk_buff *skb)
149{
150 return skb->data - skb->head;
151}
152
153static inline int skb_tailroom(const struct sk_buff *skb)
154{
155 return skb->end - skb->tail;
156}
157
158static inline void skb_trim(struct sk_buff *skb, unsigned int len)
159{
160 if (skb->len > len) {
161 skb->len = len;
162 skb_set_tail_pointer(skb, len);
163 }
164}
165
166static inline void skb_queue_head_init(struct sk_buff_head *list)
167{
168 spin_lock_init(&list->lock);
169 list->prev = list->next = (struct sk_buff *)list;
170 list->qlen = 0;
171}
172
173static inline int skb_queue_empty(const struct sk_buff_head *list)
174{
175 return list->next == (const struct sk_buff *)list;
176}
177
178static inline bool skb_queue_is_last(const struct sk_buff_head *list, const struct sk_buff *skb)
179{
180 return skb->next == (const struct sk_buff *)list;
181}
182
183static inline bool skb_queue_is_first(const struct sk_buff_head *list, const struct sk_buff *skb)
184{
185 return skb->prev == (const struct sk_buff *) list;
186}
187
188static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, const struct sk_buff *skb)
189{
190 return skb->next;
191}
192
193static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, const struct sk_buff *skb)
194{
195 return skb->prev;
196}
197
198static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list)
199{
200 newsk->next = next;
201 newsk->prev = prev;
202 next->prev = newsk;
203 prev->next = newsk;
204 list->qlen++;
205}
206static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk)
207{
208 __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
209}
210static inline void __skb_queue_before(struct sk_buff_head *list, struct sk_buff *next, struct sk_buff *newsk)
211{
212 __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
213}
214
215static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
216{
217 struct sk_buff *next, *prev;
218
219 list->qlen--;
220 next = skb->next;
221 prev = skb->prev;
222 skb->next = skb->prev = NULL;
223 next->prev = prev;
224 prev->next = next;
225}
226static inline void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
227{
228 unsigned long flags;
229 spin_lock_irqsave(&list->lock, flags);
230 __skb_unlink(skb, list);
231 spin_unlock_irqrestore(&list->lock, flags);
232}
233
234static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
235{
236 unsigned long flags;
237 spin_lock_irqsave(&list->lock, flags);
238 __skb_queue_after(list, old, newsk);
239 spin_unlock_irqrestore(&list->lock, flags);
240}
241
242static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
243{
244 unsigned long flags;
245 spin_lock_irqsave(&list->lock, flags);
246 __skb_queue_after(list, (struct sk_buff *)list, newsk);
247 spin_unlock_irqrestore(&list->lock, flags);
248}
249
250static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
251{
252 unsigned long flags;
253 spin_lock_irqsave(&list->lock, flags);
254 __skb_queue_before(list, (struct sk_buff *)list, newsk);
255 spin_unlock_irqrestore(&list->lock, flags);
256}
257
258static inline struct sk_buff *skb_peek(const struct sk_buff_head *list)
259{
260 struct sk_buff *skb = list->next;
261 if (skb == (struct sk_buff *)list) {
262 skb = NULL;
263 }
264 return skb;
265}
266static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list)
267{
268 struct sk_buff *skb = list->prev;
269 if (skb == (struct sk_buff *)list) {
270 skb = NULL;
271 }
272 return skb;
273}
274
275static inline struct sk_buff * skb_dequeue(struct sk_buff_head *list)
276{
277 unsigned long flags;
278 struct sk_buff *skb;
279 spin_lock_irqsave(&list->lock, flags);
280 skb = skb_peek(list);
281 if (skb != NULL) {
282 __skb_unlink(skb, list);
283 }
284 spin_unlock_irqrestore(&list->lock, flags);
285 return skb;
286}
287static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
288{
289 unsigned long flags;
290 struct sk_buff *skb;
291 spin_lock_irqsave(&list->lock, flags);
292 skb = skb_peek_tail(list);
293 if (skb != NULL) {
294 __skb_unlink(skb, list);
295 }
296 spin_unlock_irqrestore(&list->lock, flags);
297 return skb;
298}
299
300extern void kfree_skb(struct sk_buff *skb);
301static inline void consume_skb(struct sk_buff *skb)
302{
303 kfree_skb(skb);
304}
305
306#define dev_kfree_skb(__skb) consume_skb(__skb)
307#define dev_kfree_skb_any(__skb) dev_kfree_skb(__skb)
308#define dev_consume_skb_any(__skb) consume_skb(__skb)
309static inline struct sk_buff * dev_alloc_skb(unsigned int length)
310{
311 return alloc_skb(length, GFP_ATOMIC);
312}
313
314extern void skb_init(void);
315
316#ifdef __cplusplus
317}
318#endif
319
320#endif
unsigned int gfp_t
Definition gfp.h:10
#define GFP_ATOMIC
Definition gfp.h:15
struct ieee80211_ext_chansw_ie data
Definition ieee80211.h:80
#define spin_lock_init(lock)
Definition spinlock.h:29
#define spin_unlock_irqrestore(lock, flags)
Definition spinlock.h:38
int spinlock_t
Definition spinlock.h:26
#define spin_lock_irqsave(lock, flags)
Definition spinlock.h:37
__u8 u8
Definition types.h:69
uint32_t __u32
Definition types.h:55
uint8_t __u8
Definition types.h:51
#define NULL
Definition stddef.h:52
unsigned int sk_buff_data_t
Definition skbuff.h:15
void skb_init(void)
Definition vsf_linux_core.c:947
struct sk_buff * alloc_skb(unsigned int size, gfp_t priority)
Definition vsf_linux_core.c:884
void kfree_skb(struct sk_buff *skb)
Definition vsf_linux_core.c:930
void * memset(void *s, int ch, size_t n)
void * memcpy(void *dest, const void *src, size_t n)
Definition atomic.h:23
Definition list.h:77
struct list_head * prev
Definition list.h:78
struct list_head * next
Definition list.h:78
Definition llist.h:10
Definition refcount.h:11
Definition skbuff.h:46
spinlock_t lock
Definition skbuff.h:55
struct sk_buff_list list
Definition skbuff.h:52
__u32 qlen
Definition skbuff.h:54
struct sk_buff * next
Definition skbuff.h:49
struct sk_buff * prev
Definition skbuff.h:50
Definition skbuff.h:41
struct sk_buff * next
Definition skbuff.h:42
struct sk_buff * prev
Definition skbuff.h:43
Definition skbuff.h:22
unsigned char * head
Definition skbuff.h:37
struct sk_buff * prev
Definition skbuff.h:26
struct sk_buff * next
Definition skbuff.h:25
unsigned char * data
Definition skbuff.h:37
char cb[48]
Definition skbuff.h:32
struct llist_node ll_node
Definition skbuff.h:29
sk_buff_data_t tail
Definition skbuff.h:35
refcount_t users
Definition skbuff.h:38
sk_buff_data_t end
Definition skbuff.h:36
unsigned int len
Definition skbuff.h:34
struct list_head list
Definition skbuff.h:28
unsigned int data_len
Definition skbuff.h:34
Definition skbuff.h:17
__u8 flags
Definition skbuff.h:18
atomic_t dataref
Definition skbuff.h:19
uint64_t offset
Definition vsf_memfs.h:49
uint32_t size
Definition vsf_memfs.h:50
vsf_msgt_node_offset_t next
Definition vsf_msg_tree.h:192
uint_fast8_t length
Definition vsf_pbuf.c:38