linux/include/net/pkt_sched.h
<<
>>
Prefs
   1#ifndef __NET_PKT_SCHED_H
   2#define __NET_PKT_SCHED_H
   3
   4#include <linux/jiffies.h>
   5#include <net/sch_generic.h>
   6
   7struct qdisc_walker
   8{
   9        int     stop;
  10        int     skip;
  11        int     count;
  12        int     (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
  13};
  14
  15extern rwlock_t qdisc_tree_lock;
  16
  17#define QDISC_ALIGNTO           32
  18#define QDISC_ALIGN(len)        (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
  19
  20static inline void *qdisc_priv(struct Qdisc *q)
  21{
  22        return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
  23}
  24
  25/* 
  26   Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
  27   
  28   Normal IP packet size ~ 512byte, hence:
  29
  30   0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
  31   10Mbit ethernet.
  32
  33   10msec resolution -> <50Kbit/sec.
  34   
  35   The result: [34]86 is not good choice for QoS router :-(
  36
  37   The things are not so bad, because we may use artifical
  38   clock evaluated by integration of network data flow
  39   in the most critical places.
  40
  41   Note: we do not use fastgettimeofday.
  42   The reason is that, when it is not the same thing as
  43   gettimeofday, it returns invalid timestamp, which is
  44   not updated, when net_bh is active.
  45 */
  46
  47/* General note about internal clock.
  48
  49   Any clock source returns time intervals, measured in units
  50   close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
  51   microseconds, otherwise something close but different chosen to minimize
  52   arithmetic cost. Ratio usec/internal untis in form nominator/denominator
  53   may be read from /proc/net/psched.
  54 */
  55
  56
  57#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
  58
  59typedef struct timeval  psched_time_t;
  60typedef long            psched_tdiff_t;
  61
  62#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
  63#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs)
  64#define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay)
  65
  66#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
  67
  68typedef u64     psched_time_t;
  69typedef long    psched_tdiff_t;
  70
  71#ifdef CONFIG_NET_SCH_CLK_JIFFIES
  72
  73#if HZ < 96
  74#define PSCHED_JSCALE 14
  75#elif HZ >= 96 && HZ < 192
  76#define PSCHED_JSCALE 13
  77#elif HZ >= 192 && HZ < 384
  78#define PSCHED_JSCALE 12
  79#elif HZ >= 384 && HZ < 768
  80#define PSCHED_JSCALE 11
  81#elif HZ >= 768
  82#define PSCHED_JSCALE 10
  83#endif
  84
  85#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
  86#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
  87#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
  88
  89#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
  90#ifdef CONFIG_NET_SCH_CLK_CPU
  91#include <asm/timex.h>
  92
  93extern psched_tdiff_t psched_clock_per_hz;
  94extern int psched_clock_scale;
  95extern psched_time_t psched_time_base;
  96extern cycles_t psched_time_mark;
  97
  98#define PSCHED_GET_TIME(stamp)                                          \
  99do {                                                                    \
 100        cycles_t cur = get_cycles();                                    \
 101        if (sizeof(cycles_t) == sizeof(u32)) {                          \
 102                if (cur <= psched_time_mark)                            \
 103                        psched_time_base += 0x100000000ULL;             \
 104                psched_time_mark = cur;                                 \
 105                (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
 106        } else {                                                        \
 107                (stamp) = cur>>psched_clock_scale;                      \
 108        }                                                               \
 109} while (0)
 110#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
 111#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
 112
 113#endif /* CONFIG_NET_SCH_CLK_CPU */
 114
 115#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
 116
 117#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
 118#define PSCHED_TDIFF(tv1, tv2) \
 119({ \
 120           int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
 121           int __delta = (tv1).tv_usec - (tv2).tv_usec; \
 122           if (__delta_sec) { \
 123                   switch (__delta_sec) { \
 124                   default: \
 125                           __delta = 0; \
 126                   case 2: \
 127                           __delta += USEC_PER_SEC; \
 128                   case 1: \
 129                           __delta += USEC_PER_SEC; \
 130                   } \
 131           } \
 132           __delta; \
 133})
 134
 135static inline int
 136psched_tod_diff(int delta_sec, int bound)
 137{
 138        int delta;
 139
 140        if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1)
 141                return bound;
 142        delta = delta_sec * USEC_PER_SEC;
 143        if (delta > bound || delta < 0)
 144                delta = bound;
 145        return delta;
 146}
 147
 148#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
 149({ \
 150           int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
 151           int __delta = (tv1).tv_usec - (tv2).tv_usec; \
 152           switch (__delta_sec) { \
 153           default: \
 154                   __delta = psched_tod_diff(__delta_sec, bound);  break; \
 155           case 2: \
 156                   __delta += USEC_PER_SEC; \
 157           case 1: \
 158                   __delta += USEC_PER_SEC; \
 159           case 0: \
 160                   if (__delta > bound || __delta < 0) \
 161                        __delta = bound; \
 162           } \
 163           __delta; \
 164})
 165
 166#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
 167                                (tv1).tv_sec <= (tv2).tv_sec) || \
 168                                 (tv1).tv_sec < (tv2).tv_sec)
 169
 170#define PSCHED_TADD2(tv, delta, tv_res) \
 171({ \
 172           int __delta = (tv).tv_usec + (delta); \
 173           (tv_res).tv_sec = (tv).tv_sec; \
 174           while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
 175           (tv_res).tv_usec = __delta; \
 176})
 177
 178#define PSCHED_TADD(tv, delta) \
 179({ \
 180           (tv).tv_usec += (delta); \
 181           while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \
 182                 (tv).tv_usec -= USEC_PER_SEC; } \
 183})
 184
 185/* Set/check that time is in the "past perfect";
 186   it depends on concrete representation of system time
 187 */
 188
 189#define PSCHED_SET_PASTPERFECT(t)       ((t).tv_sec = 0)
 190#define PSCHED_IS_PASTPERFECT(t)        ((t).tv_sec == 0)
 191
 192#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
 193
 194#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
 195
 196#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
 197#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
 198        min_t(long long, (tv1) - (tv2), bound)
 199
 200
 201#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
 202#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
 203#define PSCHED_TADD(tv, delta) ((tv) += (delta))
 204#define PSCHED_SET_PASTPERFECT(t)       ((t) = 0)
 205#define PSCHED_IS_PASTPERFECT(t)        ((t) == 0)
 206#define PSCHED_AUDIT_TDIFF(t)
 207
 208#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
 209
 210extern struct Qdisc_ops pfifo_qdisc_ops;
 211extern struct Qdisc_ops bfifo_qdisc_ops;
 212
 213extern int register_qdisc(struct Qdisc_ops *qops);
 214extern int unregister_qdisc(struct Qdisc_ops *qops);
 215extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 216extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 217extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 218                struct rtattr *tab);
 219extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
 220
 221extern void __qdisc_run(struct net_device *dev);
 222
 223static inline void qdisc_run(struct net_device *dev)
 224{
 225        if (!netif_queue_stopped(dev) &&
 226            !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 227                __qdisc_run(dev);
 228}
 229
 230extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
 231        struct tcf_result *res);
 232
 233/* Calculate maximal size of packet seen by hard_start_xmit
 234   routine of this device.
 235 */
 236static inline unsigned psched_mtu(struct net_device *dev)
 237{
 238        unsigned mtu = dev->mtu;
 239        return dev->hard_header ? mtu + dev->hard_header_len : mtu;
 240}
 241
 242#endif
 243
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.