mirror of the now-defunct rocklinux.org
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

130 lines
4.9 KiB

  1. # --- ROCK-COPYRIGHT-NOTE-BEGIN ---
  2. #
  3. # This copyright note is auto-generated by ./scripts/Create-CopyPatch.
  4. # Please add additional copyright information _after_ the line containing
  5. # the ROCK-COPYRIGHT-NOTE-END tag. Otherwise it might get removed by
  6. # the ./scripts/Create-CopyPatch script. Do not edit this copyright text!
  7. #
  8. # ROCK Linux: rock-src/package/base/linux/linux24/01-linux-2.4-seg-4.patch
  9. # ROCK Linux is Copyright (C) 1998 - 2006 Clifford Wolf
  10. #
  11. # This patch file is dual-licensed. It is available under the license the
  12. # patched project is licensed under, as long as it is an OpenSource license
  13. # as defined at http://www.opensource.org/ (e.g. BSD, X11) or under the terms
  14. # of the GNU General Public License as published by the Free Software
  15. # Foundation; either version 2 of the License, or (at your option) any later
  16. # version.
  17. #
  18. # --- ROCK-COPYRIGHT-NOTE-END ---
  19. --- linux/arch/i386/kernel/apm.c.seg 2005-03-27 13:10:45.000000000 -0800
  20. +++ linux/arch/i386/kernel/apm.c 2005-03-28 10:30:24.000000000 -0800
  21. @@ -327,7 +327,7 @@ extern int (*console_blank_hook)(int);
  22. * Save a segment register away
  23. */
  24. #define savesegment(seg, where) \
  25. - __asm__ __volatile__("movl %%" #seg ",%0" : "=m" (where))
  26. + __asm__ __volatile__("mov %%" #seg ",%0" : "=m" (where))
  27. /*
  28. * Maximum number of events stored
  29. @@ -553,7 +553,7 @@ static inline void apm_restore_cpus(unsi
  30. #ifdef APM_ZERO_SEGS
  31. # define APM_DECL_SEGS \
  32. - unsigned int saved_fs; unsigned int saved_gs;
  33. + unsigned short saved_fs; unsigned short saved_gs;
  34. # define APM_DO_SAVE_SEGS \
  35. savesegment(fs, saved_fs); savesegment(gs, saved_gs)
  36. # define APM_DO_ZERO_SEGS \
  37. --- linux/arch/i386/kernel/process.c.seg 2005-03-27 13:10:45.000000000 -0800
  38. +++ linux/arch/i386/kernel/process.c 2005-03-28 10:30:24.000000000 -0800
  39. @@ -544,7 +544,7 @@ void release_thread(struct task_struct *
  40. * Save a segment.
  41. */
  42. #define savesegment(seg,value) \
  43. - asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
  44. + asm volatile("mov %%" #seg ",%0":"=m" (value))
  45. int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
  46. unsigned long unused,
  47. @@ -661,8 +661,8 @@ void fastcall __switch_to(struct task_st
  48. * Save away %fs and %gs. No need to save %es and %ds, as
  49. * those are always kernel segments while inside the kernel.
  50. */
  51. - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
  52. - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
  53. + asm volatile("mov %%fs,%0":"=m" (prev->fs));
  54. + asm volatile("mov %%gs,%0":"=m" (prev->gs));
  55. /*
  56. * Restore %fs and %gs.
  57. --- linux/arch/x86_64/kernel/process.c.seg 2005-03-27 13:10:51.000000000 -0800
  58. +++ linux/arch/x86_64/kernel/process.c 2005-03-28 11:16:57.000000000 -0800
  59. @@ -527,10 +527,10 @@ int copy_thread(int nr, unsigned long cl
  60. p->thread.fs = me->thread.fs;
  61. p->thread.gs = me->thread.gs;
  62. - asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
  63. - asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
  64. - asm("movl %%es,%0" : "=m" (p->thread.es));
  65. - asm("movl %%ds,%0" : "=m" (p->thread.ds));
  66. + asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
  67. + asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
  68. + asm("mov %%es,%0" : "=m" (p->thread.es));
  69. + asm("mov %%ds,%0" : "=m" (p->thread.ds));
  70. unlazy_fpu(current);
  71. p->thread.i387 = current->thread.i387;
  72. @@ -575,11 +575,11 @@ struct task_struct *__switch_to(struct t
  73. /*
  74. * Switch DS and ES.
  75. */
  76. - asm volatile("movl %%es,%0" : "=m" (prev->es));
  77. + asm volatile("mov %%es,%0" : "=m" (prev->es));
  78. if (unlikely(next->es | prev->es))
  79. loadsegment(es, next->es);
  80. - asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
  81. + asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
  82. if (unlikely(next->ds | prev->ds))
  83. loadsegment(ds, next->ds);
  84. @@ -588,7 +588,7 @@ struct task_struct *__switch_to(struct t
  85. */
  86. {
  87. unsigned fsindex;
  88. - asm volatile("movl %%fs,%0" : "=g" (fsindex));
  89. + asm volatile("movl %%fs,%0" : "=r" (fsindex));
  90. /* segment register != 0 always requires a reload.
  91. also reload when it has changed.
  92. when prev process used 64bit base always reload
  93. @@ -609,7 +609,7 @@ struct task_struct *__switch_to(struct t
  94. }
  95. {
  96. unsigned gsindex;
  97. - asm volatile("movl %%gs,%0" : "=g" (gsindex));
  98. + asm volatile("movl %%gs,%0" : "=r" (gsindex));
  99. if (unlikely((gsindex | next->gsindex) || prev->gs)) {
  100. load_gs_index(next->gsindex);
  101. if (gsindex)
  102. --- linux/include/asm-i386/system.h.seg 2005-03-27 15:33:12.000000000 -0800
  103. +++ linux/include/asm-i386/system.h 2005-03-28 10:30:24.000000000 -0800
  104. @@ -84,7 +84,7 @@ static inline unsigned long _get_base(ch
  105. #define loadsegment(seg,value) \
  106. asm volatile("\n" \
  107. "1:\t" \
  108. - "movl %0,%%" #seg "\n" \
  109. + "mov %0,%%" #seg "\n" \
  110. "2:\n" \
  111. ".section .fixup,\"ax\"\n" \
  112. "3:\t" \
  113. @@ -96,7 +96,7 @@ static inline unsigned long _get_base(ch
  114. ".align 4\n\t" \
  115. ".long 1b,3b\n" \
  116. ".previous" \
  117. - : :"m" (*(unsigned int *)&(value)))
  118. + : :"m" (value))
  119. /*
  120. * Clear and set 'TS' bit respectively