projects / fglrx-packaging.git / commitdiff
Build Results
 
Summary

Description: Unnamed repository; edit this file 'description' to name the repository.
Last Change: Tue 10/14/14 4:44

Recent Commits
Time
Signed-Off By
Description
Commit Diff
Mon 10/13/14 11:22
Alberto Milone  
Ubuntu: avoid... 
Thu 9/11/14 5:59
Alberto Milone  
Ubuntu: drop link to... 
Fri 9/5/14 9:45
Alberto Milone  
Ubuntu: make sure it's... 
Mon 9/1/14 8:07
Alberto Milone  
Ubuntu: create a symlink... 
Mon 8/25/14 9:09
Alberto Milone  
Ubuntu: do not generate... 
Thu 8/21/14 3:56
Alberto Milone  
Ubuntu:... 
 
> --git a/Ubuntu/dists/intrepid/patches/02_2.6.26_support.patch b/Ubuntu/dists/intrepid/patches/02_2.6.26_support.patch
new file mode 100644
index 0000000..317d1da
--- /dev/null
+++ b/Ubuntu/dists/intrepid/patches/02_2.6.26_support.patch
@@ -0,+1,1769 @@
+
#This patch enables 2.6.26 support until AMD officially supports it.
+#It is originally by   Zilvina Valinskas <zilvinas@ilibox.com>
+#and adapted for AMD's 8-6 driver by Mario Limonciello <Mario_Limonciello@Dell.com>
+
+
diff -Nur -'*.orig' -'*~' fglrx-installer-8.501/lib/modules/fglrx/build_mod/firegl_public.c fglrx-installer-8.501.new/lib/modules/fglrx/build_mod/firegl_public.c
+--- fglrx-installer-8.501/lib/modules/fglrx/build_mod/firegl_public.c    2008-06-19 01:45:20.000000000 -0500
++++ fglrx-installer-8.501.new/lib/modules/fglrx/build_mod/firegl_public.c    2008-07-07 14:06:02.000000000 -0500
+@@ -24,13 +24,13 @@
// ============================================================
#include <linux/version.h>
+
+-
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#error Kernel versions older than 2.6.0 are no longer supported by this module.
+-#endif
++#endif
+
#include <linux/autoconf.h>
+
+-
#if !defined(CONFIG_X86_PC)
++#if !defined(CONFIG_X86_PC)
#if !defined(CONFIG_X86_64)
#if !defined(CONFIG_X86_VOYAGER)
#if !defined(CONFIG_X86_NUMAQ)
+@@ -62,10 +62,10 @@
+  * 
distribution would even include such a kernel patch. */
#ifdef CONFIG_MEM_MIRROR
/* Prevent asm/mm_track.h from being included in subsequent
+- * kernel headers as that would redefine CONFIG_MEM_MIRROR. */
++ * kernel headers as that would redefine CONFIG_MEM_MIRROR. */
#ifndef CONFIG_X86_64
#define __I386_MMTRACK_H__
+-#define mm_track(ptep)
++#define mm_track(ptep)
#else
#define __X86_64_MMTRACK_H__
#define mm_track_pte(ptep)
+@@ -93,+93,@@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
+-#include <linux/init.h>
++//#include <linux/init.h>
#include <linux/file.h>
#include <linux/pci.h>
#include <linux/wait.h>
+@@ -151,+151,@@
#include "linux/freezer.h"
#endif
+
+-
//  For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h.
+-#ifndef UTS_RELEASE
++//  For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h.
++#ifndef UTS_RELEASE
#include <linux/utsrelease.h>
#endif
+
+@@ -
210,+210,@@
int __ke_debuglevel 0;
int __ke_moduleflags 0;
+
+-static 
struct pci_device_id fglrx_pci_table[] =
++static 
struct pci_device_id fglrx_pci_table[] =
+ {
#define FGL_ASIC_ID(x)                      \
+    {                           \
+@@ -
278,+278,@@
+
+ static 
int firegl_pat_enabled 0;
+ static 
unsigned long KCL_pat[2];
+-static 
unsigned long KCL_orig_pat[2];
++static 
unsigned long KCL_orig_pat[2];
+
+ static 
int ATI_API_CALL KCL_enable_pat(void);
+ static 
void ATI_API_CALL KCL_disable_pat(void);
+@@ -
305,+305,@@
#endif
+
__ke_ssize_t ip_firegl_readstruct file *filp,
+-                         
char *buf,
++                         
char *buf,
+                          
__ke_size_t size,
+                          
__ke_loff_t *off_ptr)
+ {
+@@ -
313,+313,@@
+ }
+
__ke_ssize_t ip_firegl_writestruct file *filp,
+-                          const 
char *buf,
++                          const 
char *buf,
+                           
__ke_size_t size,
+                           
__ke_loff_t *off_ptr)
+ {
+@@ -
449,34 +449,34 @@
READ_PROC_WRAP(firegl_bios_version)
READ_PROC_WRAP(firegl_interrupt_info)
+
+-static 
int
++static int
firegl_interrupt_open_wrap(
+-        
struct inode *inode,
+-        
struct file *file)
++        
struct inode *inode,
++        
struct file *file)
+ {
+     return 
firegl_interrupt_open(inodefile);
+ }
+
+-static 
int
++static int
firegl_interrupt_release_wrap(
+-        
struct inode *inode,
+-        
struct file *file)
++        
struct inode *inode,
++        
struct file *file)
+ {
+     return 
firegl_interrupt_release(inodefile);
+ }
+
+-static 
ssize_t
++static ssize_t
firegl_interrupt_read_wrap(
+-        
struct file *user_file,
+-        
char __user *user_buf,
+-        
size_t user_buf_size,
++        
struct file *user_file,
++        
char __user *user_buf,
++        
size_t user_buf_size,
+         
loff_t *user_file_pos)
+ {
+     return (
ssize_tfiregl_interrupt_read(user_fileuser_bufuser_buf_sizeuser_file_pos);
+ }
+
+-static 
unsigned int
+-firegl_interrupt_poll_wrap(struct file *user_filepoll_table *pt)
++static 
unsigned int
++firegl_interrupt_poll_wrap(struct file *user_filepoll_table *pt)
+ {
+     if(
firegl_interrupt_poll(user_file, (__ke_poll_table*)pt))
+     {
+@@ -
488,11 +488,11 @@
+     }
+ }
+
+-static 
ssize_t
++static ssize_t
firegl_interrupt_write_wrap(
+-        
struct file *user_file,
+-        const 
char __user *user_buf,
+-        
size_t user_buf_size,
++        
struct file *user_file,
++        const 
char __user *user_buf,
++        
size_t user_buf_size,
+         
loff_t *user_file_pos)
+ {
+     return (
ssize_tfiregl_interrupt_write(user_fileuser_bufuser_buf_sizeuser_file_pos);
+@@ -
502,+502,@@
+  *  \
param func function to be wrapped
+  *  \return None */
+
+-static 
void
++static void
firegl_smp_func_parameter_wrap(
void *func)
+ {
+@@ -
517,+517,@@
+     .
write      firegl_interrupt_write_wrap
+ };
+
+-
__ke_proc_list_t firegl_proc_list[] =
++
__ke_proc_list_t firegl_proc_list[] =
+ {
+     { 
"name",           drm_name_info_wrap,         NULL},
+     { 
"mem",            drm_mem_info_wrap,          NULL},
+@@ -
586,11 +586,11 @@
+         {
+             
ent->proc_fops = (struct file_operations*)list->fops;
+         }
+-
++
+         {
+             
ent->data = (dev->pubdev.signature == FGL_DEVICE_SIGNATURE)? firegl_find_device(minor) : (dev);
+         }
+-
++
+         list++;
+     }
+
+@@ -
623,+623,@@
+     {
+         
remove_proc_entry("dri"NULL);
+         
__KE_DEBUG("remove proc dri. \n");
+-    }
++    }
+     return 
0;
+ }
+
+@@ -
661,12 +661,12 @@
+ {
+     
int i;
+         
int count 0;
+-
++
+         
__KE_DEBUG("firegl_stub_getminor: name=\"%s\"\n"name);
+
+-    for( 
0FIREGL_STUB_MAXCARDSi++ )
++    for( 
0FIREGL_STUB_MAXCARDSi++ )
+         {
+-        if( !
firegl_stub_list[i].fops )
++        if( !
firegl_stub_list[i].fops )
+             {
+         
firegl_stub_list[i].name name;
+         
firegl_stub_list[i].fops fops;
+@@ -
693,16 +693,16 @@
+     if (
minor || minor >= FIREGL_STUB_MAXCARDS)
+     {
+         return -
1;
+-    }
++    }
+     
firegl_proc_cleanup(minorfiregl_stub_rootfiregl_stub_list[minor].dev_rootfiregl_stub_list[minor].proclist);
+     
firegl_stub_list[minor].name NULL;
+     
firegl_stub_list[minor].fops NULL;
+     
firegl_stub_list[minor].proclist NULL;
+
+-    if( 
minor == (firegl_minors-1) )
++    if( 
minor == (firegl_minors-1) )
+     {
+         
unregister_chrdev(DRM_MAJOR"drm");
+-    }
++    }
+     return 
0;
+ }
+
+@@ -
726,+726,@@
+         return -
1;
+     } else if(
err == -EBUSY) {
+
+-        
// the registering of the module's device has failed
++        // the registering of the module's device has failed
+         // because there was already some other drm module loaded.
+         __KE_DEBUG("register_chrdev() failed with -EBUSY\n");
+     return -
1;
+@@ -
758,+758,@@
/* Starting from 2.6.14, kernel has new struct defined for pm_message_t,
+    we have to handle this case separately.
+    2.6.11/12/13 kernels have pm_message_t defined as int and older kernels
+-   don't have pm_message_t defined.
++   don't have pm_message_t defined.
+  */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
+ static int fglrx_pci_suspend(struct pci_dev *pdevpm_message_t pm_event)
+@@ -
794,+794,@@
+
+     if (!
ret)
+     {
+-
+-    
// since privdev->pcidev is acquired in X server, use pdev
+-    // directly here to allow suspend/resume without X server start.
++
++    
// since privdev->pcidev is acquired in X server, use pdev
++    // directly here to allow suspend/resume without X server start.
+         firegl_pci_save_state((__ke_pci_dev_t*)pdevprivdev);
+         
pci_disable_device(pdev);
+         
PMSG_EVENT(pdev->dev.power.power_state) = state;
+@@ -
848,13 +848,13 @@
+     
// before pci_set_master!
+     firegl_pci_restore_state((__ke_pci_dev_t*)pdevprivdev);
+
+-    if (
pci_enable_device(pdev))
++    if (
pci_enable_device(pdev))
+     {
+         
__KE_ERROR("Cannot enable PCI device.\n");
+-    }
++    }
+
+     
pci_set_master(pdev);
+-
++
+     
firegl_cail_powerup(privdev);
+
+     if (
PMSG_EVENT(pdev->dev.power.power_state) == PM_EVENT_SUSPEND)
+@@ -
865,+865,@@
+     return 
0;
+ }
+
+-static 
struct pci_driver fglrx_pci_driver =
++static 
struct pci_driver fglrx_pci_driver =
+ {
+     .
name           "fglrx_pci",
+     .
id_table       fglrx_pci_table,
+@@ -
921,10 +921,10 @@
+     {
+         
pid = (struct pci_device_id *) &fglrx_pci_table[i];
+         
pdev NULL;
+-        while (( 
pdev pci_get_subsys(pid->vendor,
+-                                       
pid->device,
+-                                       
PCI_ANY_ID,
+-                                       
PCI_ANY_ID,
++        while (( 
pdev pci_get_subsys(pid->vendor,
++                                       
pid->device,
++                                       
PCI_ANY_ID,
++                                       
PCI_ANY_ID,
+                                        
pdev)) != NULL)
+         {
+             
num_of_devices++;
+@@ -
934,+934,@@
+
+     if (
firegl_init_device_heads(num_of_devices))
+     {
+-        return -
ENOMEM;
++        return -
ENOMEM;
+     }
+
+     for (
i=0fglrx_pci_table[i].vendor != 0i++)
+@@ -
942,15 +942,15 @@
+         
pid = (struct pci_device_id *) &fglrx_pci_table[i];
+
+         
pdev NULL;
+-        while (( 
pdev pci_get_subsys(pid->vendor,
+-                                       
pid->device,
+-                                       
PCI_ANY_ID,
+-                                       
PCI_ANY_ID,
++        while (( 
pdev pci_get_subsys(pid->vendor,
++                                       
pid->device,
++                                       
PCI_ANY_ID,
++                                       
PCI_ANY_ID,
+                                        
pdev)) != NULL)
+         {
+             if ((
ret_code firegl_get_dev(pubdevpdev)))
+             {
+-                return 
ret_code;
++                return 
ret_code;
+             }
+
+             
j++;
+@@ -
983,+983,@@
+     
// init global vars that are in fact constants
+     __ke_HZ HZ;
+
+-
#ifdef _KE_SERIAL_DEBUG
++#ifdef _KE_SERIAL_DEBUG
+     __ke_SetSerialPort();
#endif
+
+@@ -
995,11 +995,11 @@
+         return 
retcode;
+     }
+
+-
#ifdef FIREGL_CF_SUPPORT
++#ifdef FIREGL_CF_SUPPORT
+     adapter_chain_init();
+     
cf_object_init();
+-
#endif
+-
++
#endif
++
+     
// init DRM proc list
+     drm_proclist kmalloc((DRM_PROC_ENTRIES 1) * sizeof(__ke_proc_list_t), GFP_KERNEL);
+     if ( 
drm_proclist == NULL )
+@@ -
1087,+1087,@@
+             
dev->pubdev.date,
+             
firegl_minors);
+
+-
++
#ifdef FIREGL_POWER_MANAGEMENT
+     if (pci_register_driver (&fglrx_pci_driver) < 0)
+     {
+@@ -
1117,12 +1117,12 @@
+     {
+        
KCL_disable_pat();
+        
__KE_INFO("Disable PAT\n");
+-    }
++    }
#endif // FIREGL_USWC_SUPPORT
+
+     for (
0counti++)
+     {
+-        if ( 
firegl_stub_unregister(i) )
++        if ( 
firegl_stub_unregister(i) )
+         {
+             
__KE_ERROR("Cannot unload module on minor: %d\n"i);
+         }
+@@ -
1144,10 +1144,10 @@
+             
dev->pubdev.patchlevel,
+             
dev->pubdev.date);
+
+-
#ifdef FIREGL_CF_SUPPORT
++#ifdef FIREGL_CF_SUPPORT
+     cf_object_cleanup();
+-    
adapter_chain_cleanup();
+-
#endif // FIREGL_CF_SUPPORT
++    adapter_chain_cleanup();
++
#endif // FIREGL_CF_SUPPORT
+
+     
firegl_private_cleanup (&dev->pubdev);
+
+@@ -
1225,18 +1225,18 @@
void ATI_API_CALL __ke_remove_wait_queue(__ke_wait_queue_head_tqueue_head__ke_wait_queue_tentry)
+ {
//    current->state = TASK_RUNNING;
+-    remove_wait_queue((wait_queue_head_t*)(void *)queue_head,
++    
remove_wait_queue((wait_queue_head_t*)(void *)queue_head,
+                                     (
wait_queue_t*)(void *)entry);
+ }
+
void ATI_API_CALL __ke_init_waitqueue_head(__ke_wait_queue_head_tqueue_head)
+ {
+-    
init_waitqueue_head((wait_queue_head_t*)(void *)queue_head);
++    
init_waitqueue_head((wait_queue_head_t*)(void *)queue_head);
+ }
+
void ATI_API_CALL __ke_wait_event_interruptible(__ke_wait_queue_head_tqueue_headint condition)
+ {
+-    
wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition);
++    
wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition);
+ }
+
void ATI_API_CALL __ke_poll_wait(struct filefilp__ke_wait_queue_head_tqueue_head__ke_poll_tablept)
+@@ -
1247,13 +1247,13 @@
void ATI_API_CALL *__ke_asyncio_alloc_sema()
+ {
+     
int i;
+-
++
+     for(
i=0i<FIREGL_ASYNCIO_MAX_SEMAi++)
+     {
+         if(
fireglAsyncioSemaphoreUsed[i] != 1)
+         {
+             
fireglAsyncioSemaphoreUsed[i] = 1;
+-
++
+             return &(
fireglAsyncioSemaphore[i]);
+         }
+     }
+@@ -
1263,+1263,@@
void ATI_API_CALL __ke_asyncio_free_sema(struct semaphore *pSema)
+ {
+     
int i;
+-
++
+     for(
i=0i<FIREGL_ASYNCIO_MAX_SEMAi++)
+     {
+         if( &(
fireglAsyncioSemaphore[i]) == pSema )
+@@ -
1277,15 +1277,15 @@
void ATI_API_CALL __ke_asyncio_init_sema(void)
+ {
+     
int i;
+-
++
+     for(
i=0i<FIREGL_ASYNCIO_MAX_SEMAi++)
+     {
+         
fireglAsyncioSemaphoreUsed[i] = 0;
+     }
+-}
++}
+
+-
int ATI_API_CALL __ke_fasync_helperint fd,
+-                        
struct file *filep,
++
int ATI_API_CALL __ke_fasync_helperint fd,
++                        
struct file *filep,
+                         
int mode,
+                         
struct fasync_struct **pasync_queue)
+ {
+@@ -
1359,33 +1359,33 @@
+ {
+    
struct task_struct *p;
+    
int process_terminated 1;
+-
+-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++
++
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+    rcu_read_lock();
+-
#else
++#else
+    read_lock(&tasklist_lock);
+-
#endif
+-   find_task_by_pidpid );
+-   if (
p)
++
#endif
++   find_task_by_vpidpid );
++   if (
p)
+    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
+-      if (p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD)
++      if (
p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD)
#else
+-      if (!(p->flags PF_EXITING))
++      if (!(
p->flags PF_EXITING))
#endif
+       {
+          
process_terminated 0;
+       }
+-   }
+-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++   }
++
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+    rcu_read_unlock();
+-
#else
++#else
+    read_unlock(&tasklist_lock);
+-
#endif
++#endif
+    return process_terminated;
+ }
+
+-
/** /brief Call global OS kernel task/thread scheduler
++/** /brief Call global OS kernel task/thread scheduler
+  *  /return Nonzero if a system call was awakened by a signal
+  */
int ATI_API_CALL KCL_GetSignalStatus(void)
+@@ -
1439,15 +1439,15 @@
+     
unblock_all_signals();
+ }
+
+-
#if defined(__i386__)
++#if defined(__i386__)
#ifndef __HAVE_ARCH_CMPXCHG
+-static inline
+-unsigned long __fgl_cmpxchg(volatile void *ptrunsigned long old,
+-                        
unsigned long new, int size)
+-{
+-    
unsigned long prev;
+-    switch (
size) {
+-    case 
1:
++static 
inline
++unsigned long __fgl_cmpxchg(volatile void *ptrunsigned long old,
++                        
unsigned long new, int size)
++{
++    
unsigned long prev;
++    switch (
size) {
++    case 
1:
+         
__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+                              : "=a"(prev)
+                              : 
"q"(new), "m"(*__xg(ptr)), "0"(old)
+@@ -
1558,+1558,@@
+  */
KCL_TYPE_Pid ATI_API_CALL KCL_GetPid(void)
+ {
+-    return 
current->pid;
++    return 
current->pid;
+ }
+
/** /brief Return the current Thread Group ID
+@@ -1566,7 +1566,7 @@
+  */
KCL_TYPE_Pid ATI_API_CALL KCL_GetTgid(void)
+ {
+-    return 
current->tgid;
++    return 
current->tgid;
+ }
+
/** /brief Return the effective user ID
+@@ -1662,7 +1662,7 @@
+     1
+ #else
+     0
+-#endif
++#endif
+ };
+
+ /** /brief Check whether a kernel configuration parameter is defined
+@@ -1730,13 +1730,13 @@
+ #if defined(__x86_64__) || defined(__ia64__)
+ void* ATI_API_CALL __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle)
+ {
+-    return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle));
++    return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle));
+ }
+
+ void ATI_API_CALL __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
+                          unsigned int dma_handle)
+ {
+-    pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr,
++    pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr,
+         (unsigned long)dma_handle);
+ }
+ #endif // __ia64__
+@@ -1760,7 +1760,7 @@
+
+ /** \brief This function maps OS independent error conditions to OS defined error codes
+  *  \param errcode OS independent error condition code
+- *  \return OS kernel defined error code corresponding to the requested error condition
++ *  \return OS kernel defined error code corresponding to the requested error condition
+  */
int ATI_API_CALL KCL_GetErrorCode(KCL_ENUM_ErrorCode errcode)
+ {
+@@ -
1778,+1778,@@
int ATI_API_CALL firegl_get_user_ptr(u32 *srcvoid **dst)
+ {
+   
unsigned long temp;
+-  
int err get_user(tempsrc);
++  
int err get_user(tempsrc);
+   *
dst = (void*) temp;
+   return 
err;
+ }
+@@ -
1918,+1918,@@
+
int ATI_API_CALL __ke_atomic_dec_and_test(voidv)
+ {
+-    return 
atomic_dec_and_test((atomic_t*)v);
++    return 
atomic_dec_and_test((atomic_t*)v);
+ }
+
/*****************************************************************************/
+@@ -2018,19 +2018,19 @@
#endif
+
#ifdef _KE_SERIAL_DEBUG
+-// To enable serial port debug message dumping,just define _KE_SERIAL_DEBUG in firegl_public.h file.
+-// Connect two PC with a null modern serial cable. run Hyper ternimal on the remote machine.
+-// It's useful to debug resume if network not works properly and serial port is not recovered
++// To enable serial port debug message dumping,just define _KE_SERIAL_DEBUG in firegl_public.h file.
++// Connect two PC with a null modern serial cable. run Hyper ternimal on the remote machine.
++// It's useful to debug resume if network not works properly and serial port is not recovered
// properly when fglrx resume hook is called...
+-
+-
++
++
#define SER_DATA_PORT       0x3f8
#define SER_INT_CTRL_PORT   SER_DATA_PORT + 1
#define SER_INT_STAT_PORT   SER_DATA_PORT + 2
#define SER_LINE_CTRL_PORT  SER_DATA_PORT + 3
#define SER_MODEM_CTRL_PORT SER_DATA_PORT + 4
#define SER_LINE_STAT_PORT  SER_DATA_PORT + 5
+-
++
void ATI_API_CALL __ke_printc(char c)
+ {
+      while((
inb(SER_LINE_STAT_PORT) & 0x20) == ); //wait until Transmitter Holding Register Empty
+@@ -2040,+2040,@@
void ATI_API_CALL __ke_printstr(const char *str)
+ {
+     
int len strlen(str);
+-    while(
len--)__ke_printc(*str++);
++    while(
len--)__ke_printc(*str++);
+ }
+
int ATI_API_CALL __ke_SerPrint(const char *format, ...)
+@@ -
2053,26 +2053,26 @@
+     
vsprintf(bufferformatap);
+
+     
va_end(ap);
+-
++
+     
__ke_printstr(buffer);
+-
++
+     return 
0;
+ }
void ATI_API_CALL __ke_SetSerialPort()
+ {
+     
DRM_INFO("setup serial port\n");
+-    
outb(0x00,  SER_INT_CTRL_PORT);   // Turn off interrupts
++    outb(0x00,  SER_INT_CTRL_PORT);   // Turn off interrupts
+
+-    
outb(0x80,  SER_LINE_CTRL_PORT);  // SET DLAB ON
+-    outb(0x01,  SER_DATA_PORT);  // Set Baud rate - Divisor Latch Low Byte
+-                             // 0x01 = 115,200 ,0x02 =  57,600,  0x06 =  19,200 BPS, 0x0C =   9,600 BPS
+-    outb(0x00,  SER_DATA_PORT 1);  // Set Baud rate - Divisor Latch High Byte
+-    outb(0x03,  SER_LINE_CTRL_PORT); // reset DLAB ,8 Bits, No Parity, 1 Stop Bit
+-    outb(0xC7,  SER_DATA_PORT 2);  // FIFO Control Register
++    outb(0x80,  SER_LINE_CTRL_PORT);  // SET DLAB ON
++    outb(0x01,  SER_DATA_PORT);  // Set Baud rate - Divisor Latch Low Byte
++                             // 0x01 = 115,200 ,0x02 =  57,600,  0x06 =  19,200 BPS, 0x0C =   9,600 BPS
++    outb(0x00,  SER_DATA_PORT 1);  // Set Baud rate - Divisor Latch High Byte
++    outb(0x03,  SER_LINE_CTRL_PORT); // reset DLAB ,8 Bits, No Parity, 1 Stop Bit
++    outb(0xC7,  SER_DATA_PORT 2);  // FIFO Control Register
+     outb(0x0b,  SER_DATA_PORT 4);  // Turn on DTR, RTS, and OUT2
+-
++
+     
__ke_printstr("serial port 0x3f8 is set ready for message print out \n");
+-}
++}
#endif
+
/** \brief Get number of available RAM pages
+@@ -2256,7 +2256,7 @@
+     struct page *page = NULL;
+
+     page = vmalloc_to_page(vmalloc_addr);
+-    if(page == NULL)
++    if(page == NULL)
+     {
+         __KE_ERROR("__ke_vmalloc_to_addr: invalid page!");
+         return NULL;
+@@ -2298,7 +2298,7 @@
+     retcode = do_munmap(current->mm,
+                         addr,
+                         len);
+-#endif
++#endif
+     up_write(&current->mm->mmap_sem);
+     return retcode;
+ }
+@@ -2342,10 +2342,10 @@
+        minus = !minus;
+     }
+     else
+-    {
++    {
+        ubase = base;
+     }
+-
++
+     do_div(un, ubase);
+     return (minus? -un : un);
+ }
+@@ -2375,7 +2375,7 @@
+     else
+     {
+        ubase = base;
+-    }
++    }
+
+     rem = do_div(un, ubase);
+     return (minus? -rem : rem);
+@@ -2406,7 +2406,7 @@
+     vaddr = (void *) vmap(pages, count);
+ #else
+ #ifdef VM_MAP
+-    vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL);
++    vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL);
+ #else
+     vaddr = (void *) vmap(pages, count, 0, PAGE_KERNEL);
+ #endif
+@@ -2462,7 +2462,7 @@
+ }
+ #endif  // defined(VM_MAP) || defined(vunmap)
+
+-/** \brief Reserve a memory page
++/** \brief Reserve a memory page
+  *
+  * \param pt Kernel logical address of the page
+  *
+@@ -2474,7 +2474,7 @@
+     SetPageReserved(virt_to_page((unsigned long)pt));
+ }
+
+-/** \brief Unreserve a memory page
++/** \brief Unreserve a memory page
+  *
+  * \param pt Kernel logical address of the page
+  *
+@@ -2486,7 +2486,7 @@
+     ClearPageReserved(virt_to_page((unsigned long)pt));
+ }
+
+-/** \brief Lock a memory page
++/** \brief Lock a memory page
+  *
+  * \param pt Kernel logical address of the page
+  *
+@@ -2495,14 +2495,14 @@
+  */
void ATI_API_CALL KCL_LockMemPage(voidpt)
+ {
+-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+     SetPageReserved(virt_to_page((unsigned long)pt));
#else
+     lock_page(virt_to_page((unsigned long)pt));
#endif
+ }
+
+-
/** \brief Unlock a memory page
++/** \brief Unlock a memory page
+  *
+  * \param pt Kernel logical address of the page
+  *
+@@ -2511,7 +2511,7 @@
+  */
void ATI_API_CALL KCL_UnlockMemPage(voidpt)
+ {
+-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+     ClearPageReserved(virt_to_page((unsigned long)pt));
#else
+     unlock_page(virt_to_page((unsigned long)pt));
+@@ -
2536,+2536,@@
+     return 
memory->vmptr;
+ }
#endif
+-
++
voidATI_API_CALL __ke_ioremap(unsigned long offsetunsigned long size)
+ {
+     return 
ioremap(offsetsize);
+@@ -
2592,+2592,@@
+ {
/*Some kernel developer removed the export of symbol "flush_tlb_page" on 2.6.25 x86_64 SMP kernel.
+   Define a simple version here.*/
+-#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25))
++#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25))
+     on_each_cpu(KCL_flush_tlb_one, &va11);
#else
+     flush_tlb_page(vmava);
+@@ -
2698,+2698,@@
+ static 
inline int ptep_clear_flush_dirty(struct vm_area_struct *vmaunsigned long addrpte_t *ptep)
+ {
+     
int ret 0;
+-
++
+     
DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X"vmaaddrptep, *ptep);
+-
++
+     if (
pte_dirty(*ptep))
+     {
#ifdef __x86_64__
+@@ -2715,19 +2715,19 @@
+         {
+             
pte_update(vma->vm_mmaddrptep);
+         }
+-
#endif
++#endif
+     }
+
+     
DBG_TRACE("0x%08X->0x%08X"ptep, *ptep);
+-
++
+     
// Flush Translation Lookaside Buffers
+     if (ret)
+     {
+         
KCL_flush_tlb_onepage(vma,addr);
+     }
+-
++
+     
DBG_LEAVE("%d"ret);
+-
++
+     return 
ret;
+ }
#endif
+@@ -2754,+2754,@@
+  * 
the virtual address
+  *
+  * \
param mm Pointer to the memory descriptor structure
+- * \param virtual_addr Virtual address
++ * \param virtual_addr Virtual address
+  *
+  * \return 
Old value of the "dirty" flag on success or negative on error
+  *
+@@ -
3015,+3015,@@
+        return 
0;
#else
+     return 1;
+-
#endif
++#endif
#else /* !CONFIG_MTRR */
+     return 0;
#endif /* !CONFIG_MTRR */
+@@ -3041,+3041,@@
+
int ATI_API_CALL __ke_has_vmap(void)
+ {
+-
// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue.
++// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue.
#if defined(VM_MAP) || defined(vunmap)
+     return 1;
#else
+@@ -3063,+3063,@@
+ {
+     return 
0;
+ }
+-
#endif
++#endif
+
/*****************************************************************************/
+
+@@ -
3124,+3124,@@
+ {
+     
struct pci_devdev = (struct pci_dev*)pcidev;
+     return 
PCI_FUNC(dev->devfn);
+-}
++}
+
__ke_dma_addr_t ATI_API_CALL __ke_pci_map_single (__ke_pci_dev_t *pdevvoid *buffer__ke_size_t sizeint direction)
+ {
+@@ -
3211,+3211,@@
+     return 
IRQ_HANDLED;
+ }
#endif
+-
++
int ATI_API_CALL __ke_request_irq(unsigned int irq,
+     
void (*ATI_API_CALL handler)(intvoid *, void *),
+     const 
char *dev_namevoid *dev_id)
+@@ -
3224,+3224,@@
+         
SA_SHIRQ,
#else
+         IRQF_SHARED,
+-
#endif
++#endif
+         dev_name,
+         
dev_id);
+ }
+@@ -
3260,12 +3260,12 @@
+     return (int)(
agpmem->page_count);
+ }
+
+-
void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memoryagpmem,
++
void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memoryagpmem,
+                                              
unsigned long **memory_ptr)
+ {
+     
__KE_DEBUG("[%s] agpmem=0x%016lx agpmem->memory=0x%016lx [0]=0x%016x",
+-               
__FUNCTION__,
+-               (
unsigned long)agpmem,
++               
__FUNCTION__,
++               (
unsigned long)agpmem,
+                (
unsigned long)agpmem->memory,
+                (
agpmem->memory)[0]);
+
+@@ -
3274,18 +3274,@@
+
/*****************************************************************************/
+
+-
#ifndef NOPAGE_SIGBUS
+-#define NOPAGE_SIGBUS 0
+-#endif /* !NOPAGE_SIGBUS */
+-
typedef struct page mem_map_t;
typedef mem_map_t *vm_nopage_ret_t;
+
+-static 
__inline__ vm_nopage_ret_t do_vm_nopage(struct vm_area_structvma,
+-                                                     
unsigned long address)
+-{
+-    return 
0;   /* Disallow mremap */
+-}
+
#ifdef __AGP__BUILTIN__
#ifdef __ia64__
+@@ -3310,19 +3301,20 @@
+             return 
page;
+         }
+     }
+-    return 
NOPAGE_SIGBUS;        /* Disallow mremap */
++    return VM_FAULT_SIGBUS;        /* Disallow mremap */
+ }
+
#endif /* __ia64__ */
#endif /* __AGP__BUILTIN__ */
+
+
+-static 
__inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_structvma,
+-                                                   
unsigned long address)
++static 
__inline__ int do_vm_shm_nopage(struct vm_area_structvma,
++                       
struct vm_fault *vmf)
+ {
+     
pgd_tpgd_p;
+     
pmd_tpmd_p;
+     
pte_t  pte;
++    
unsigned long address = (unsigned long)vmf->virtual_address;
+     
unsigned long vma_offset;
+     
unsigned long pte_linear;
+     
mem_map_tpMmPage;
+@@ -
3351,+3343,@@
+             (
unsigned long)__ke_vm_offset(vma));
+
+     if (
address vma->vm_end)
+-        return 
NOPAGE_SIGBUS/* address is out of range */
++        return VM_FAULT_SIGBUS/* address is out of range */
+
+     
/*  Calculate offset into VMA */
+     vma_offset address vma->vm_start;
+@@ -
3363,+3355,@@
+     
pte_linear firegl_get_addr_from_vm(vma);
+     if (!
pte_linear)
+     {
+-        return 
NOPAGE_SIGBUS/* bad address */
++        return VM_FAULT_SIGBUS/* bad address */
+     }
+     
pte_linear += vma_offset;
+
+@@ -
3391,+3383,@@
+
+     
//  __KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n",
+     //    address, page_address(pMmPage));
+-    return pMmPage;
++    
vmf->page pMmPage;
++
++    return 
0;
+ }
+
/*
+@@ -3400,8 +3394,10 @@
+       (which is one ore more pages in size)
+
+ */
+-static __inline__ vm_nopage_ret_t do_vm_dma_nopage(struct vm_area_structvmaunsigned long address)
++static 
__inline__ int do_vm_dma_nopage(struct vm_area_structvma,
++                       
struct vm_fault *vmf)
+ {
++    
unsigned long address = (unsigned longvmf->virtual_address;
+     
unsigned long kaddr;
+     
mem_map_tpMmPage;
+
+@@ -
3417,+3413,@@
+     
kaddr firegl_get_addr_from_vm(vma);
+     if (!
kaddr)
+     {
+-        return 
NOPAGE_SIGBUS/* bad address */
++        return VM_FAULT_SIGBUS/* bad address */
+     }
+     
kaddr += (address vma->vm_start);
+
+@@ -
3429,19 +3425,23 @@
+     
// with drm_alloc_pages, which marks all pages as reserved. Reserved
+     // pages' usage count is not decremented by the kernel during unmap!!!
+     //
+-    // For kernel >= 2.6.15, We should reenable this, because the VM sub-system
+-    // will decrement the pages' usage count even for the pages marked as reserved
++    // For kernel >= 2.6.15, We should reenable this, because the VM sub-system
++    // will decrement the pages' usage count even for the pages marked as reserved
+     //                                 - MC.
+     get_page(pMmPage); /* inc usage count of page */
#endif
+
+     
__KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n",
+         
addresspage_address(pMmPage));
+-    return 
pMmPage;
++    
vmf->page pMmPage;
++
++    return 
0;
+ }
+
+-static 
__inline__ vm_nopage_ret_t do_vm_kmap_nopage(struct vm_area_structvmaunsigned long address)
++static 
__inline__ int do_vm_kmap_nopage(struct vm_area_structvma,
++                    
struct vm_fault *vmf)
+ {
++    
unsigned long address = (unsigned longvmf->virtual_address;
+     
unsigned long kaddr;
+     
mem_map_tpMmPage;
+
+@@ -
3451,13 +3451,14 @@
+     if ((
pMmPage = (mem_map_t*) firegl_get_pagetable_page_from_vm(vma)))
+     {
+         
get_page(pMmPage);
+-        return 
pMmPage;
++    
vmf->page pMmPage;
++        return 
0;
+     }
+
+     
kaddr firegl_get_addr_from_vm(vma);
+     if (!
kaddr)
+     {
+-        return 
NOPAGE_SIGBUS/* bad address */
++        return VM_FAULT_SIGBUS/* bad address */
+     }
+     
kaddr += (address vma->vm_start);
+
+@@ -
3470,50 +3471,52 @@
+
+     
__KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n"addresspage_address(pMmPage));
+
+-    return 
pMmPage;
++    
vmf->page pMmPage;
++    return 
0;
+ }
+
+-
/**
++/**
+  **
+- **  This routine is intented to locate the page table through the
++ **  This routine is intented to locate the page table through the
+  **  pagelist table created earlier in dev-> pcie
+  **/
+-static __inline__ vm_nopage_ret_t do_vm_pcie_nopage(struct vm_area_structvma,
+-                                                         
unsigned long address)
++static 
__inline__ int do_vm_pcie_nopage(struct vm_area_structvma,
++                    
struct vm_fault *vmf)
+ {
+
++    
unsigned long address = (unsigned long)vmf->virtual_address;
+     
unsigned long vma_offset;
+-    
unsigned long i;
++    
unsigned long i;
+     
mem_map_tpMmPage;
+     
struct firegl_pcie_mempciemem;
+     
unsigned longpagelist;
+-
++
+     
drm_device_t *dev = (drm_device_t *)firegl_get_dev_from_vm(vma);
+     if (
dev == NULL)
+     {
+         
__KE_ERROR("dev is NULL\n");
+-        return 
NOPAGE_SIGBUS;
++        return 
VM_FAULT_SIGBUS;
+     }
+
+     if (
address vma->vm_end)
+     {
+         
__KE_ERROR("address out of range\n");
+-        return 
NOPAGE_SIGBUS/* address is out of range */
++        return VM_FAULT_SIGBUS/* address is out of range */
+     }
+     
pciemem firegl_get_pciemem_from_addr vmaaddress);
+     if (
pciemem == NULL)
+     {
+         
__KE_ERROR("No pciemem found! \n");
+-        return 
NOPAGE_SIGBUS;
+-    }
++        return 
VM_FAULT_SIGBUS;
++    }
+     
pagelist firegl_get_pagelist_from_vm(vma);
+
+-    if (
pagelist == NULL)
++    if (
pagelist == NULL)
+     {
+         
__KE_ERROR("No pagelist! \n");
+-        return 
NOPAGE_SIGBUS;
++        return 
VM_FAULT_SIGBUS;
+     }
+-
++
+     
/** Find offset in  vma */
+     vma_offset address vma->vm_start;
+     
/** Which entry in the pagelist */
+@@ -3525,15 +3528,17 @@
+     if (
page_address(pMmPage) == 0x0)
+     {
+         
__KE_ERROR("Invalid page address\n");
+-        return 
NOPAGE_SIGBUS;
++        return 
VM_FAULT_SIGBUS;
+     }
+-    return 
pMmPage;
++
++    
vmf->page pMmPage;
++    return 
0;
+ }
+
+-static 
__inline__ vm_nopage_ret_t do_vm_gart_nopage(struct vm_area_structvma,
+-                                                    
unsigned long address)
++static 
__inline__ int do_vm_gart_nopage(struct vm_area_structvma,
++                    
struct vm_fault *vmf)
+ {
+-
++    
unsigned long address = (unsigned longvmf->virtual_address;
+     
unsigned long page_addr;
+     
unsigned long offset;
+     
struct page *page;
+@@ -
3541,36 +3546,31 @@
+     if (
address vma->vm_end)
+     {
+         
__KE_ERROR("Invalid virtual address\n");
+-        return 
NOPAGE_SIGBUS;   /* Disallow mremap */
+-    }
++        return 
VM_FAULT_SIGBUS;   /* Disallow mremap */
++    }
+
+     
offset      address vma->vm_start;
+-
#ifdef FIREGL_CF_SUPPORT
++#ifdef FIREGL_CF_SUPPORT
+     page_addr   mc_heap_get_page_addr(vmaoffset);
#else
+     page_addr   firegl_cmmqs_get_pageaddr_from_vm(vmaoffset);
+-
#endif
++#endif
+     if( !page_addr)
+     {
+         
__KE_ERROR("Invalid page address\n");
+-        return 
NOPAGE_SIGBUS;   /* Disallow mremap */
++        return VM_FAULT_SIGBUS;   /* Disallow mremap */
+     }
+     
page        virt_to_page(page_addr);
+     
get_page(page);
+
+-    return 
page;
++    
vmf->page page;
++    return 
0;
+ }
+
+-
+-
+-
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+-
+-static 
vm_nopage_ret_t vm_nopage(struct vm_area_structvma,
+-                                 
unsigned long address,
+-                                 
int *type)
++static 
int vm_nopage(struct vm_area_structvma,
++             
struct vm_fault *vmf)
+ {
+-    if (
type) *type VM_FAULT_MINOR;
+-        return 
do_vm_nopage(vmaaddress);
++    return 
VM_FAULT_SIGBUS;
+ }
+
#ifdef __AGP__BUILTIN__
+@@ -3602,12 +3602,10 @@
+     (
which is one or more pages in size)
+
+  */
+-static 
vm_nopage_ret_t vm_shm_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int *type)
++static 
int vm_shm_nopage(struct vm_area_structvma,
++             
struct vm_fault *vmf)
+ {
+-    if (
type) *type VM_FAULT_MINOR;
+-        return 
do_vm_shm_nopage(vmaaddress);
++        return 
do_vm_shm_nopage(vmavmf);
+ }
+
/*
+@@ -3616,116 +3614,30 @@
+       (which is one ore more pages in size)
+
+ */
+-static vm_nopage_ret_t vm_dma_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int *type)
++static 
int vm_dma_nopage(struct vm_area_structvma,
++             
struct vm_fault *vmf)
+ {
+-    if (
type) *type VM_FAULT_MINOR;
+-        return 
do_vm_dma_nopage(vmaaddress);
++        return 
do_vm_dma_nopage(vmavmf);
+ }
+
+-static 
vm_nopage_ret_t vm_kmap_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int *type)
++static 
int vm_kmap_nopage(struct vm_area_structvma,
++              
struct vm_fault *vmf)
+ {
+-    if (
type) *type VM_FAULT_MINOR;
+-        return 
do_vm_kmap_nopage(vmaaddress);
++        return 
do_vm_kmap_nopage(vmavmf);
+ }
+
+-static 
vm_nopage_ret_t vm_pcie_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int *type)
+-{
+-       return 
do_vm_pcie_nopage(vmaaddress);
+-}
+-
+-static 
vm_nopage_ret_t vm_gart_nopage(struct vm_area_structvma,
+-                                      
unsigned long address,
+-                                      
int *type)
+-{
+-       return 
do_vm_gart_nopage(vmaaddress);
+-}
+-
+-
#else   /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
+-
+-static 
vm_nopage_ret_t vm_nopage(struct vm_area_structvma,
+-                                 
unsigned long address,
+-                                 
int write_access)
+-{
+-    return 
do_vm_nopage(vmaaddress);
+-}
+-
+-
#ifdef __AGP__BUILTIN__
+-#ifdef __ia64__
+-
+-
+-static 
vm_nopage_ret_t vm_cant_nopage(struct vm_area_structvma,
+-                                 
unsigned long address,
+-                                 
int write_access)
+-{
+-    return 
do_vm_cant_nopage(vmaaddress);
+-}
+-
#endif /* __ia64__ */
+-#endif /* __AGP__BUILTIN__ */
+-
+-
/*
+-
+-    This function is called when a page of a mmap()'ed area is not currently
+-    visible in the specified VMA.
+-    Return value is the associated physical address for the requested page.
+-    (If not implemented, then the kernel default routine would allocate a new,
+-     zeroed page for servicing us)
+-
+-    Possible errors: SIGBUS, OutOfMem
+-
+-    This routine is intended to remap addresses of SHM SAREA
+-    (which is one or more pages in size)
+-
+- */
+-static vm_nopage_ret_t vm_shm_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int write_access)
+-{
+-    return 
do_vm_shm_nopage(vmaaddress);
+-}
+-
+-
/*
+-
+-    This routine is intended to remap addresses of a OpenGL context
+-      (which is one ore more pages in size)
+-
+-*/
+-static vm_nopage_ret_t vm_dma_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int write_access)
+-{
+-     return 
do_vm_dma_nopage(vmaaddress);
+-}
+-
+-static 
vm_nopage_ret_t vm_kmap_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int write_access)
++static 
int vm_pcie_nopage(struct vm_area_structvma,
++              
struct vm_fault *vmf)
+ {
+-     return 
do_vm_kmap_nopage(vmaaddress);
++    return 
do_vm_pcie_nopage(vmavmf);
+ }
+
+-static 
vm_nopage_ret_t vm_pcie_nopage(struct vm_area_structvma,
+-                                     
unsigned long address,
+-                                     
int write_access)
+-{
+-        return 
do_vm_pcie_nopage(vmaaddress);
+-}
+-
+-static 
vm_nopage_ret_t vm_gart_nopage(struct vm_area_structvma,
+-                                      
unsigned long address,
+-                                      
int *type)
++static 
int vm_gart_nopage(struct vm_area_structvma,
++              
struct vm_fault *vmf)
+ {
+-       return 
do_vm_gart_nopage(vmaaddress);
++       return 
do_vm_gart_nopage(vmavmf);
+ }
+
+-
+-
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) */
+-
voidATI_API_CALL __ke_vma_file_priv(struct vm_area_structvma)
+ {
+     return 
vma->vm_file->private_data;
+@@ -
3775,+3687,@@
+     *(
buf i++) = pgprot _PAGE_DIRTY    'd' '-';
+     *(
buf i++) = pgprot _PAGE_PSE      'm' 'k';
+     *(
buf i++) = pgprot _PAGE_GLOBAL   'g' 'l';
+-
#endif /* __i386__ */
++#endif /* __i386__ */
+     *(buf i++) = 0;
+
+     return 
buf;
+@@ -
3804,+3716,@@
+     return 
buf;
+ }
+
+-
charATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_structvma,
+-                            
charbuf,
+-                            
unsigned long virtual_addr,
++
charATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_structvma,
++                            
charbuf,
++                            
unsigned long virtual_addr,
+                             
__ke_dma_addr_tphys_address)
+ {
+     
pgd_tpgd_p;
+@@ -
3830,+3742,@@
+
+ static 
struct vm_operations_struct vm_ops =
+ {
+-    
nopage:  vm_nopage,
++    
fault:   vm_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+@@ -
3848,42 +3760,42 @@
+
+ static 
struct vm_operations_struct vm_shm_ops =
+ {
+-    
nopage:  vm_shm_nopage,
++    
fault:   do_vm_shm_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+
+ static 
struct vm_operations_struct vm_pci_bq_ops =
+ {
+-    
nopage:  vm_dma_nopage,
++    
fault:   vm_dma_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+
+ static 
struct vm_operations_struct vm_ctx_ops =
+ {
+-    
nopage:  vm_dma_nopage,
++    
fault:   vm_dma_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+
+-static 
struct vm_operations_struct vm_pcie_ops =
++static 
struct vm_operations_struct vm_pcie_ops =
+ {
+-    
nopage:  vm_pcie_nopage,
++    
fault:   vm_pcie_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+
+ static 
struct vm_operations_struct vm_kmap_ops =
+ {
+-    
nopage:  vm_kmap_nopage,
++    
fault:   vm_kmap_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+
+ static 
struct vm_operations_struct vm_gart_ops =
+ {
+-    
nopage:  vm_gart_nopage,
++    
fault:   vm_gart_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+@@ -
3892,14 +3804,14 @@
#ifndef __ia64__
+ static struct vm_operations_struct vm_agp_bq_ops =
+ {
+-    
nopage:  vm_nopage,
++    
fault:   vm_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+-
#else
++#else
+ static struct vm_operations_struct vm_cant_agp_bq_ops =
+ {
+-    
nopage:  vm_cant_nopage,
++    
fault:   vm_cant_nopage,
+     
open:    ip_drm_vm_open,
+     
close:   ip_drm_vm_close,
+ };
+@@ -
3934,19 +3846,19 @@
#ifdef __i386__
+                 if (boot_cpu_data.x86 3)
+                 {
+-
#ifdef FIREGL_USWC_SUPPORT
++#ifdef FIREGL_USWC_SUPPORT
+                     if (!firegl_pat_enabled)
+-
#endif
++#endif
+                     {
+                         
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+                         
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
+                     }
+-
#ifdef FIREGL_USWC_SUPPORT
++#ifdef FIREGL_USWC_SUPPORT
+                     else
+                     {
+                         
vma->vm_page_prot pgprot_writecombine(vma->vm_page_prot);
+-                    }
+-
#endif
++                    }
++
#endif
+                 }
#endif /* __i386__ */
#ifdef __ia64__
+@@ -3965,+3877,@@
+             }
+             break;
+
+-
#ifdef FIREGL_USWC_SUPPORT
++#ifdef FIREGL_USWC_SUPPORT
+         case __KE_ADPT_REG:
+             {
#ifdef __ia64__
+@@ -3985,+3897,@@
+                     }
+                     else
+                     {
+-                        
vma->vm_page_prot pgprot_noncached(vma->vm_page_prot);
++                        
vma->vm_page_prot pgprot_noncached(vma->vm_page_prot);
+                     }
+                 }
#endif /* __i386__ */
+@@ -4004,+3916,@@
+             
vma->vm_ops = &vm_ops;
+             }
+             break;
+-
#endif
++#endif
+
+         case 
__KE_SHM:
+             
vma->vm_flags |= VM_SHM VM_RESERVED/* Don't swap */
+@@ -4038,+3950,@@
+
#ifdef __AGP__BUILTIN__
+         case __KE_AGP:
+-            
// if(dev->agp->cant_use_aperture == 1)
++            // if(dev->agp->cant_use_aperture == 1)
#ifdef __ia64__
+             {
+                 
/*
+@@ -4062,9 +3974,9 @@
+                     if( firegl_pat_enabled )
+                     {
+                         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+-                    }
++                    }
+                 }
+-#endif
++#endif
+
+                 if (REMAP_PAGE_RANGE(vma,offset))
+                 {
+@@ -4081,8 +3993,8 @@
+ #endif
+             break;
+         case __KE_AGP_BQS:
+-            // if(dev->agp->cant_use_aperture == 1)
+-#ifdef __ia64__
++            // if(dev->agp->cant_use_aperture == 1)
++#ifdef __ia64__
+             {
+                 /*
+                  * On some systems we can't talk to bus dma address from
+@@ -4105,9 +4017,9 @@
+                     if( firegl_pat_enabled )
+                     {
+                        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+-                    }
++                    }
+                 }
+-#endif
++#endif
+
+                 if (REMAP_PAGE_RANGE(vma,offset))
+                 {
+@@ -4136,15 +4048,15 @@
+             break;
+
+          case __KE_GART_USWC:
+-#ifdef FIREGL_USWC_SUPPORT
++#ifdef FIREGL_USWC_SUPPORT
+             if (boot_cpu_data.x86 > 3)
+             {
+                 if( firegl_pat_enabled )
+                 {
+                     vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+-                }
++                }
+             }
+-#endif
++#endif
+             // fall through
+          case __KE_GART_CACHEABLE:
+              vma->vm_flags |= VM_RESERVED;
+@@ -4194,7 +4106,7 @@
+ #define FIREGL_agp_backend_release  _X(agp_backend_release)
+ #define FIREGL_agp_memory           _X(agp_memory)
+
+-unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE;
++unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE;
+
+ #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ /*****************************************************************************/
+@@ -4309,+4221,@@
int ATI_API_CALL __ke_agpgart_available(__ke_pci_dev_t *pcidevint use_internal)
+ {
+     
drm_agp_module_stub = &drm_agp;
+-    
__ke_firegl_agpgart_inuse KERNEL26_AGPGART_INUSE;
++    
__ke_firegl_agpgart_inuse KERNEL26_AGPGART_INUSE;
+     {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
+         firegl_pci_device = (struct pci_dev*)(void*)pcidev;
+@@ -
4362,+4274,@@
+     } else {
+          
available __ke_firegl_agpgart_available();
+     }
+-
++
+     return 
available;
+ }
+
+@@ -
4467,+4379,@@
+
+     if (
AGP_AVAILABLE(copy_info))
+     {
+-        
struct agp_kern_info kern;
++        
struct agp_kern_info kern;
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
+         if (firegl_agp_bridge == NULL)
+@@ -
4565,+4477,@@
+
int ATI_API_CALL __ke_read_agp_caps_registers(__ke_pci_dev_tdevunsigned int *caps)
+ {
+-    return -
EINVAL;
++    return -
EINVAL;
+ }
+
int ATI_API_CALL __ke_agp_acquire(__ke_pci_dev_tdev)
+@@ -
4607,+4519,@@
+
/** \brief Runs a function on all other CPUs
+  *  \param func_to_call function to be called on all other cpus
+- *  \return None
++ *  \return None
+  */
+-void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call)
++
void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call)
+ {
#ifdef CONFIG_SMP
+     smp_call_functionfiregl_smp_func_parameter_wrap, (void*)func_to_call0);
+@@ -
4716,+4628,@@
+
int ATI_API_CALL KCL_is_pat_enabled(void)
+ {
+-   return 
firegl_pat_enabled;
++   return 
firegl_pat_enabled;
+ }
+
+ static 
int ATI_API_CALL KCL_has_pat(void)
+@@ -
4732,+4644,@@
+ {
+     
unsigned long cr0=0cr4=0;
+     
unsigned long flags;
+-
++
+     
local_irq_save(flags);
+     
cr0 read_cr0() | 0x40000000;
+     
write_cr0(cr0);
+@@ -
4763,+4675,@@
+ {
+     
unsigned long cr0 0cr4 0;
+     
unsigned long flags;
+-
++
+     
local_irq_save(flags);
+     
cr0 read_cr0() | 0x40000000;
+     
write_cr0(cr0);
+@@ -
4775,+4687,@@
+        
write_cr4(cr4 & ~X86_CR4_PGE);
+     }
+      
__flush_tlb();
+-
++
+     
wrmsr(MSR_IA32_CR_PAT,  KCL_orig_pat[0], KCL_orig_pat[1]);
+
+     
cr0 read_cr0();
+@@ -
4798,+4710,@@
+          
__KE_INFO("USWC is disabled in module parameters\n");
+          return 
0;
+      }
+-
++
+      if (!
KCL_has_pat())
+      {
+         return 
0;
+@@ -
4808,13 +4720,13 @@
+
+      for ( 
02i++ )
+      {
+-        for (
04++)
++        for (
04++)
+         {
+             if (((
KCL_orig_pat[i] >> (8)) & 0xFF) == 1)
+-            {
++            {
+                
__KE_ERROR("Pat entry %d is already configured\n", (i+1)*(j+1));
+                return 
0;
+-            }
++            }
+         }
+     }
+
+@@ -
4886,+4798,@@
+ } 
kasContext_t;
+
/** \brief KAS context */
+-static kasContext_t kasContext;
++static 
kasContext_t kasContext;
+
/** \brief Kernel support required to enable KAS */
#if defined(cmpxchg)                        && \
+@@ -5188,+5100,@@
+     
DBG_TRACE("Interrupt handler returned 0x%08X"ret);
+
+     
kasSetExecutionLevel(orig_level);
+-    
spin_unlock(&kasContext.lock_ih);
++    
spin_unlock(&kasContext.lock_ih);
+
+     
DBG_LEAVE("%d"ret);
+     return 
ret;
+@@ -
5463,+5375,@@
#endif
+     spinlock_t lock;            /* OS spinlock object protecting the cache */
+     unsigned int routine_type;  /* Type of routine the cache might be accessed from */
+-    char name[14];              /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
++    char name[24];              /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
+ } kasSlabCache_t;
+
/** \brief Return Slab Cache object size
+@@ -5501,7 +5413,8 @@
+
+     slabcache_obj->routine_type = access_type;
+     spin_lock_init(&(slabcache_obj->lock));
+-    sprintf(slabcache_obj->name, "kas(%08lX)",(unsigned long)slabcache_obj);
++    snprintf(slabcache_obj->name, sizeof(slabcache_obj->name),
++               "kas(%p)", slabcache_obj);
+
+     DBG_TRACE("creating slab object '%s'", slabcache_obj->name);
+
+@@ -6417,7 +6330,7 @@
+
+     kas_xadd(puiDestination, iAdd, ret, "l");
+
+-    return ret + iAdd;
++    return ret + iAdd;
+ #else
+     return 0xDEADC0DE; /* To make compiler happy */
#endif
+@@ -6500,+6413,@@
#ifdef FIREGL_CF_SUPPORT
+
void *ATI_API_CALL KCL_lock_init()
+-{
++{
+     
spinlock_t *lock;
+
+     
lock kmalloc(sizeof(*lock), GFP_KERNEL);
+@@ -
6512,+6425,@@
+ }
+
void ATI_API_CALL KCL_lock_deinit(void *plock)
+-{
++{
+     if (
plock == NULL)
+     {
+         
__KE_ERROR("plock is NULL\n");
+
diff -Nur -'*.orig' -'*~' fglrx-installer-8.501/lib/modules/fglrx/build_mod/firegl_public.h fglrx-installer-8.501.new/lib/modules/fglrx/build_mod/firegl_public.h
+--- fglrx-installer-8.501/lib/modules/fglrx/build_mod/firegl_public.h    2008-06-19 01:45:20.000000000 -0500
++++ fglrx-installer-8.501.new/lib/modules/fglrx/build_mod/firegl_public.h    2008-07-07 14:06:44.000000000 -0500
+@@ -78,+78,@@
+     if (!
pgd_present(*(pgd_p)))    \
+     { \
+         
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pgd)\n"); \
+-        return (
unsigned long)NOPAGE_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
++        return 
VM_FAULT_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
+         
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
+     } \
+ } while(
0)
+@@ -
91,+91,@@
+     if (!
pud_present(*(pud_p)))    \
+     { \
+         
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pud)\n"); \
+-        return (
unsigned long)NOPAGE_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
++        return 
VM_FAULT_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
+         
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
+     } \
+     
pmd_p pmd_offset(pud_ppte_linear); \
+@@ -
111,+111,@@
+     if (!
pmd_present(*(pmd_p)))    \
+     { \
+         
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pmd)\n"); \
+-        return (
unsigned long)NOPAGE_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
++        return 
VM_FAULT_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
+         
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
+     } \
+ } while(
0)
+@@ -
157,+157,@@
+     if (!
pte_present(pte)) \
+     { \
+         
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pte)\n"); \
+-        return (
unsigned long)NOPAGE_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
++        return 
VM_FAULT_SIGBUS;   /* Something bad happened; generate SIGBUS */ \
+         
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
+     } \
+ } while(
0)
<
 
Phoronix.com
Linux Driver Forums
Copyright © 2014 by Phoronix Media