Copyright 1999, Be Incorporated. All Rights Reserved.
This file may be used under the terms of the Be Sample Code License.
Other authors:
Mark Watson;
Rudolf Cornelissen 3/2002-11/2022.
*/
#include "AGP.h"
#include "DriverInterface.h"
#include "nv_macros.h"
#include <graphic_driver.h>
#include <KernelExport.h>
#include <SupportDefs.h>
#include <ISA.h>
#include <PCI.h>
#include <OS.h>
#include <directories.h>
#include <driver_settings.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define TRACE(x...) dprintf("nvidia: " x)
#define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
#define get_pci(o, s) (*pci_bus->read_pci_config)(pcii->bus, pcii->device, pcii->function, (o), (s))
#define set_pci(o, s, v) (*pci_bus->write_pci_config)(pcii->bus, pcii->device, pcii->function, (o), (s), (v))
#define MAX_DEVICES 8
int32 api_version = B_CUR_DRIVER_API_VERSION;
typedef struct device_info device_info;
typedef struct {
timer te;
device_info *di;
bigtime_t when_target;
} timer_info;
struct device_info {
uint32 is_open;
area_id shared_area;
shared_info *si;
vuint32 *regs;
pci_info pcii;
char name[B_OS_NAME_LENGTH];
};
typedef struct {
uint32 count;
benaphore kernel;
char *device_names[MAX_DEVICES+1];
device_info di[MAX_DEVICES];
} DeviceData;
static status_t open_hook(const char* name, uint32 flags, void** cookie);
static status_t close_hook(void* dev);
static status_t free_hook(void* dev);
static status_t read_hook(void* dev, off_t pos, void* buf, size_t* len);
static status_t write_hook(void* dev, off_t pos, const void* buf, size_t* len);
static status_t control_hook(void* dev, uint32 msg, void *buf, size_t len);
static status_t map_device(device_info *di);
static void unmap_device(device_info *di);
static void probe_devices(void);
static int32 nv_interrupt(void *data);
static DeviceData *pd;
static isa_module_info *isa_bus = NULL;
static pci_module_info *pci_bus = NULL;
static agp_gart_module_info *agp_bus = NULL;
static device_hooks graphics_device_hooks = {
open_hook,
close_hook,
free_hook,
control_hook,
read_hook,
write_hook,
NULL,
NULL,
NULL,
NULL
};
#define VENDOR_ID_NVIDIA 0x10de /* Nvidia */
#define VENDOR_ID_ELSA 0x1048 /* Elsa GmbH */
#define VENDOR_ID_NVSTBSGS 0x12d2 /* Nvidia STB/SGS-Thompson */
#define VENDOR_ID_VARISYS 0x1888 /* Varisys Limited */
static uint16 nvidia_device_list[] = {
0x0020,
0x0028,
0x0029,
0x002a,
0x002b,
0x002c,
0x002d,
0x002e,
0x002f,
0x0040,
0x0041,
0x0042,
0x0043,
0x0045,
0x0046,
0x0047,
0x0048,
0x0049,
0x004d,
0x004e,
0x0091,
0x0092,
0x0098,
0x0099,
0x009d,
0x00a0,
0x00c0,
0x00c1,
0x00c2,
0x00c3,
0x00c8,
0x00c9,
0x00cc,
0x00cd,
0x00ce,
0x00f0,
0x00f1,
0x00f2,
0x00f3,
0x00f4,
0x00f5,
0x00f6,
0x00f8,
0x00f9,
0x00fa,
0x00fb,
0x00fc,
0x00fd,
0x00fe,
0x00ff,
0x0100,
0x0101,
0x0102,
0x0103,
0x0110,
0x0111,
0x0112,
0x0113,
0x0140,
0x0141,
0x0142,
0x0143,
0x0144,
0x0145,
0x0146,
0x0147,
0x0148,
0x0149,
0x014b,
0x014c,
0x014d,
0x014e,
0x014f,
0x0150,
0x0151,
0x0152,
0x0153,
0x0160,
0x0161,
0x0162,
0x0163,
0x0164,
0x0165,
0x0166,
0x0167,
0x0168,
0x0169,
0x016a,
0x016b,
0x016c,
0x016d,
0x016e,
0x0170,
0x0171,
0x0172,
0x0173,
0x0174,
0x0175,
0x0176,
0x0177,
0x0178,
0x0179,
0x017a,
0x017c,
0x017d,
0x0181,
0x0182,
0x0183,
0x0185,
0x0186,
0x0187,
0x0188,
0x0189,
0x018a,
0x018b,
0x018c,
0x018d,
0x01a0,
0x01d1,
0x01d3,
0x01d7,
0x01d8,
0x01da,
0x01dd,
0x01df,
0x01f0,
0x0200,
0x0201,
0x0202,
0x0203,
0x0211,
0x0212,
0x0215,
0x0218,
0x0220,
0x0221,
0x0222,
0x0228,
0x0240,
0x0241,
0x0242,
0x0244,
0x0245,
0x0247,
0x0250,
0x0251,
0x0252,
0x0253,
0x0258,
0x0259,
0x025b,
0x0280,
0x0281,
0x0282,
0x0286,
0x0288,
0x0289,
0x028c,
0x0290,
0x0291,
0x0292,
0x0293,
0x0294,
0x0295,
0x0298,
0x0299,
0x029c,
0x029f,
0x02a0,
0x02e0,
0x02e1,
0x02e2,
0x0301,
0x0302,
0x0308,
0x0309,
0x0311,
0x0312,
0x0313,
0x0314,
0x0316,
0x0317,
0x031a,
0x031b,
0x031c,
0x031d,
0x031e,
0x031f,
0x0320,
0x0321,
0x0322,
0x0323,
0x0324,
0x0325,
0x0326,
0x0327,
0x0328,
0x0329,
0x032a,
0x032b,
0x032c,
0x032d,
0x032e,
0x032f,
0x0330,
0x0331,
0x0332,
0x0333,
0x0334,
0x0338,
0x033f,
0x0341,
0x0342,
0x0343,
0x0344,
0x0345,
0x0347,
0x0348,
0x0349,
0x034b,
0x034c,
0x034e,
0x034f,
0x0391,
0x0392,
0x0393,
0x0394,
0x0398,
0x03d0,
0x03d1,
0x03d2,
0x03d5,
0x03d6,
0x06e4,
0x06e8,
0x07e1,
0
};
static uint16 elsa_device_list[] = {
0x0c60,
0
};
static uint16 nvstbsgs_device_list[] = {
0x0020,
0x0028,
0x0029,
0x002a,
0x002b,
0x002c,
0x002d,
0x002e,
0x002f,
0x00a0,
0
};
static uint16 varisys_device_list[] = {
0x3503,
0x3505,
0
};
static struct {
uint16 vendor;
uint16 *devices;
} SupportedDevices[] = {
{VENDOR_ID_NVIDIA, nvidia_device_list},
{VENDOR_ID_ELSA, elsa_device_list},
{VENDOR_ID_NVSTBSGS, nvstbsgs_device_list},
{VENDOR_ID_VARISYS, varisys_device_list},
{0x0000, NULL}
};
static nv_settings sSettings = {
DRIVER_PREFIX ".accelerant",
"none",
false,
0x00000000,
0,
0,
true,
true,
false,
false,
false,
false,
true,
false,
false,
false,
false,
0,
0,
true,
};
static void
dumprom(void *rom, uint32 size, pci_info pcii)
{
int fd;
uint32 cnt;
char fname[64];
CALLED();
sprintf (fname, kUserDirectory "
pcii.vendor_id, pcii.device_id, pcii.bus, pcii.device, pcii.function);
fd = open (fname, O_WRONLY | O_CREAT, 0666);
if (fd < 0) return;
* the ROM size is a multiple of that anyway. */
for (cnt = 0; (cnt < size); cnt += 32768)
write (fd, ((void *)(((uint8 *)rom) + cnt)), 32768);
close (fd);
}
static int
caused_vbi_crtc1(vuint32 * regs)
{
return (NV_REG32(NV32_CRTC_INTS) & 0x00000001);
}
static void
clear_vbi_crtc1(vuint32 * regs)
{
NV_REG32(NV32_CRTC_INTS) = 0x00000001;
}
static void
enable_vbi_crtc1(vuint32 * regs)
{
NV_REG32(NV32_CRTC_INTS) = 0x00000001;
NV_REG32(NV32_CRTC_INTE) |= 0x00000001;
NV_REG32(NV32_MAIN_INTE) = 0x00000001;
}
static void
disable_vbi_crtc1(vuint32 * regs)
{
NV_REG32(NV32_CRTC_INTE) &= 0xfffffffe;
NV_REG32(NV32_CRTC_INTS) = 0x00000001;
}
static int
caused_vbi_crtc2(vuint32 * regs)
{
return (NV_REG32(NV32_CRTC2_INTS) & 0x00000001);
}
static void
clear_vbi_crtc2(vuint32 * regs)
{
NV_REG32(NV32_CRTC2_INTS) = 0x00000001;
}
static void
enable_vbi_crtc2(vuint32 * regs)
{
NV_REG32(NV32_CRTC2_INTS) = 0x00000001;
NV_REG32(NV32_CRTC2_INTE) |= 0x00000001;
NV_REG32(NV32_MAIN_INTE) = 0x00000001;
}
static void
disable_vbi_crtc2(vuint32 * regs)
{
NV_REG32(NV32_CRTC2_INTE) &= 0xfffffffe;
NV_REG32(NV32_CRTC2_INTS) = 0x00000001;
}
static void
disable_vbi_all(vuint32 * regs)
{
NV_REG32(NV32_CRTC_INTE) &= 0xfffffffe;
NV_REG32(NV32_CRTC_INTS) = 0x00000001;
NV_REG32(NV32_CRTC2_INTE) &= 0xfffffffe;
NV_REG32(NV32_CRTC2_INTS) = 0x00000001;
NV_REG32(NV32_MAIN_INTE) = 0x00000000;
}
static status_t
map_device(device_info *di)
{
char buffer[B_OS_NAME_LENGTH];
shared_info *si = di->si;
uint32 tmpUlong, tmpROMshadow;
pci_info *pcii = &(di->pcii);
phys_addr_t physicalAddress;
system_info sysinfo;
CALLED();
uint8* rom_temp;
area_id rom_area = -1;
int registers = 0;
int frame_buffer = 1;
tmpUlong = get_pci(PCI_command, 2);
tmpUlong |= PCI_command_memory;
tmpUlong |= PCI_command_master;
tmpUlong &= ~PCI_command_io;
set_pci(PCI_command, 2, tmpUlong);
get_system_info(&sysinfo);
if (0)
{
si->use_clone_bugfix = 1;
}
else
{
si->use_clone_bugfix = 0;
}
sprintf(buffer, DEVICE_FORMAT " regs",
di->pcii.vendor_id, di->pcii.device_id,
di->pcii.bus, di->pcii.device, di->pcii.function);
if ((di->pcii.u.h0.base_register_flags[registers] & PCI_address_type)
== PCI_address_type_64) {
TRACE("registers is 64 bit\n");
} else {
TRACE("registers is 32 bit\n");
}
si->regs_area = map_physical_memory(
buffer,
di->pcii.u.h0.base_registers_pci[registers],
di->pcii.u.h0.base_register_sizes[registers],
B_ANY_KERNEL_ADDRESS,
B_CLONEABLE_AREA | B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
(void **)&(di->regs));
si->clone_bugfix_regs = (uint32 *) di->regs;
if (si->regs_area < 0) return si->regs_area;
sprintf(buffer, DEVICE_FORMAT " rom",
di->pcii.vendor_id, di->pcii.device_id,
di->pcii.bus, di->pcii.device, di->pcii.function);
* 'don't touch': (confirmed) NV04, NV05, NV05-M64, NV11 all shutoff otherwise.
* NV18, NV28 and NV34 keep working.
* confirmed NV28 and NV34 to use upper part of shadowed ROM for scratch purposes,
* however the actual ROM content (so the used part) is intact (confirmed). */
tmpROMshadow = get_pci(NVCFG_ROMSHADOW, 4);
set_pci(NVCFG_ROMSHADOW, 4, 0);
tmpUlong = get_pci(PCI_rom_base, 4);
if (tmpUlong) {
tmpUlong |= 0x00000001;
set_pci(PCI_rom_base, 4, tmpUlong);
rom_area = map_physical_memory(
buffer,
di->pcii.u.h0.rom_base_pci,
di->pcii.u.h0.rom_size,
B_ANY_KERNEL_ADDRESS,
B_KERNEL_READ_AREA,
(void **)&(rom_temp)
);
if (rom_area >= 0) {
if ((rom_temp[0] != 0x55) || (rom_temp[1] != 0xaa)) {
delete_area(rom_area);
rom_area = -1;
tmpUlong = 0x00000000;
}
} else {
tmpUlong = 0x00000000;
}
}
if (!tmpUlong) {
rom_area = map_physical_memory(buffer, 0x000c0000,
65536, B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA, (void **)&(rom_temp));
}
if (rom_area < 0) {
delete_area(si->regs_area);
si->regs_area = -1;
return rom_area;
}
* (ROM always fits in 64Kb: checked TNT1 - FX5950) */
if (sSettings.dumprom)
dumprom(rom_temp, 65536, di->pcii);
memcpy(si->rom_mirror, rom_temp, 65536);
tmpUlong = get_pci(PCI_rom_base, 4);
tmpUlong &= 0xfffffffe;
set_pci(PCI_rom_base, 4, tmpUlong);
delete_area(rom_area);
set_pci(NVCFG_ROMSHADOW, 4, tmpROMshadow);
sprintf(buffer, DEVICE_FORMAT " framebuffer",
di->pcii.vendor_id, di->pcii.device_id,
di->pcii.bus, di->pcii.device, di->pcii.function);
physicalAddress = di->pcii.u.h0.base_registers_pci[frame_buffer];
if ((di->pcii.u.h0.base_register_flags[frame_buffer] & PCI_address_type)
== PCI_address_type_64) {
TRACE("framebuffer is 64 bit\n");
physicalAddress
|= (uint64)di->pcii.u.h0.base_registers_pci[frame_buffer + 1] << 32;
} else {
TRACE("framebuffer is 32 bit\n");
}
si->fb_area = map_physical_memory(buffer,
physicalAddress,
di->pcii.u.h0.base_register_sizes[frame_buffer],
B_ANY_KERNEL_BLOCK_ADDRESS | B_WRITE_COMBINING_MEMORY,
B_READ_AREA | B_WRITE_AREA | B_CLONEABLE_AREA,
&(si->framebuffer));
if (si->fb_area < 0) {
si->fb_area = map_physical_memory(buffer,
physicalAddress,
di->pcii.u.h0.base_register_sizes[frame_buffer],
B_ANY_KERNEL_BLOCK_ADDRESS,
B_READ_AREA | B_WRITE_AREA | B_CLONEABLE_AREA,
&(si->framebuffer));
}
if (si->fb_area < 0) {
delete_area(si->regs_area);
si->regs_area = -1;
return si->fb_area;
}
si->framebuffer_pci = (void *) physicalAddress;
* don't attempt to adress more later on */
si->ps.memory_size = di->pcii.u.h0.base_register_sizes[frame_buffer];
si->settings = sSettings;
if (si->fb_area >= 0) {
TRACE("framebuffer mapped OK\n");
} else {
TRACE("framebuffer mapping failed!\n");
}
return si->fb_area;
}
static void
unmap_device(device_info *di)
{
shared_info *si = di->si;
uint32 tmpUlong;
pci_info *pcii = &(di->pcii);
CALLED();
tmpUlong = get_pci(PCI_command, 4);
tmpUlong &= 0xfffffffc;
set_pci(PCI_command, 4, tmpUlong);
if (si->regs_area >= 0)
delete_area(si->regs_area);
if (si->fb_area >= 0)
delete_area(si->fb_area);
si->regs_area = si->fb_area = -1;
si->framebuffer = NULL;
di->regs = NULL;
}
static void
probe_devices(void)
{
uint32 pci_index = 0;
uint32 count = 0;
device_info *di = pd->di;
char tmp_name[B_OS_NAME_LENGTH];
CALLED();
while (count < MAX_DEVICES
&& (*pci_bus->get_nth_pci_info)(pci_index, &(di->pcii)) == B_OK) {
int vendor = 0;
while (SupportedDevices[vendor].vendor) {
if (SupportedDevices[vendor].vendor == di->pcii.vendor_id) {
uint16 *devices = SupportedDevices[vendor].devices;
while (*devices) {
if (*devices == di->pcii.device_id ) {
sprintf(tmp_name, DEVICE_FORMAT,
di->pcii.vendor_id, di->pcii.device_id,
di->pcii.bus, di->pcii.device, di->pcii.function);
* hierarchy folder, so the system will use it as primary adaptor if requested
* via nvidia.settings. */
if (strcmp(tmp_name, sSettings.primary) == 0)
sprintf(tmp_name, "-%s", sSettings.primary);
sprintf(di->name, "graphics/%s", tmp_name);
pd->device_names[count] = di->name;
di->is_open = 0;
di->shared_area = -1;
di->si = NULL;
di++;
count++;
goto next_device;
}
devices++;
}
}
vendor++;
}
next_device:
pci_index++;
}
pd->count = count;
pd->device_names[pd->count] = NULL;
}
static uint32
thread_interrupt_work(int32 *flags, vuint32 *regs, shared_info *si)
{
uint32 handled = B_HANDLED_INTERRUPT;
if (si->vblank >= 0) {
int32 blocked;
if ((get_sem_count(si->vblank, &blocked) == B_OK) && (blocked < 0)) {
release_sem_etc(si->vblank, -blocked, B_DO_NOT_RESCHEDULE);
handled = B_INVOKE_SCHEDULER;
}
}
return handled;
}
static int32
nv_interrupt(void *data)
{
int32 handled = B_UNHANDLED_INTERRUPT;
device_info *di = (device_info *)data;
shared_info *si = di->si;
int32 *flags = (int32*)&(si->flags);
vuint32 *regs;
if (atomic_or(flags, SKD_HANDLER_INSTALLED) & SKD_HANDLER_INSTALLED) goto exit0;
regs = di->regs;
if (si->ps.secondary_head) {
if (caused_vbi_crtc1(regs) || caused_vbi_crtc2(regs)) {
clear_vbi_crtc1(regs);
clear_vbi_crtc2(regs);
handled = thread_interrupt_work(flags, regs, si);
}
} else {
if (caused_vbi_crtc1(regs)) {
clear_vbi_crtc1(regs);
handled = thread_interrupt_work(flags, regs, si);
}
}
atomic_and(flags, ~SKD_HANDLER_INSTALLED);
exit0:
return handled;
}
static status_t
open_hook(const char* name, uint32 flags, void** cookie)
{
int32 index = 0;
device_info *di;
shared_info *si;
thread_id thid;
thread_info thinfo;
status_t result = B_OK;
char shared_name[B_OS_NAME_LENGTH];
physical_entry map[1];
size_t net_buf_size;
void *unaligned_dma_buffer;
uint32 mem_size;
CALLED();
while (pd->device_names[index]
&& (strcmp(name, pd->device_names[index]) != 0))
index++;
di = &(pd->di[index]);
AQUIRE_BEN(pd->kernel);
if (di->is_open) {
goto mark_as_open;
}
sprintf(shared_name, DEVICE_FORMAT " shared",
di->pcii.vendor_id, di->pcii.device_id,
di->pcii.bus, di->pcii.device, di->pcii.function);
di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS,
((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA);
if (di->shared_area < 0) {
result = di->shared_area;
goto done;
}
si = di->si;
net_buf_size = ((1 * 1024 * 1024) + (B_PAGE_SIZE-1)) & ~(B_PAGE_SIZE-1);
si->unaligned_dma_area =
create_area("NV DMA cmd buffer",
(void **)&unaligned_dma_buffer,
B_ANY_KERNEL_ADDRESS,
2 * net_buf_size,
B_32_BIT_CONTIGUOUS,
B_CLONEABLE_AREA | B_READ_AREA | B_WRITE_AREA);
if (si->unaligned_dma_area < 0)
{
result = si->unaligned_dma_area;
goto free_shared;
}
* fed into the GPU's engine later on. Get an aligned adress so we can use MTRR-WC
* even on older CPU's. */
get_memory_map(unaligned_dma_buffer, B_PAGE_SIZE, map, 1);
si->dma_buffer_pci = (void*)
((map[0].address + net_buf_size - 1) & ~(net_buf_size - 1));
si->dma_area = map_physical_memory(
"NV aligned DMA cmd buffer", (addr_t)si->dma_buffer_pci, net_buf_size,
B_ANY_KERNEL_BLOCK_ADDRESS | B_WRITE_COMBINING_MEMORY,
B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer));
if (si->dma_area < 0) {
si->dma_area = map_physical_memory("NV aligned DMA cmd buffer",
(addr_t)si->dma_buffer_pci, net_buf_size,
B_ANY_KERNEL_BLOCK_ADDRESS,
B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer));
}
if (si->dma_area < 0)
{
result = si->dma_area;
goto free_shared_and_uadma;
}
si->vendor_id = di->pcii.vendor_id;
si->device_id = di->pcii.device_id;
si->revision = di->pcii.revision;
si->bus = di->pcii.bus;
si->device = di->pcii.device;
si->function = di->pcii.function;
si->accelerant_in_use = false;
* wrongly identify the INT request coming from us! */
si->ps.secondary_head = false;
result = map_device(di);
if (result < 0) goto free_shared_and_alldma;
result = B_OK;
* unified memory architecture (UMA) */
switch ((((uint32)(si->device_id)) << 16) | si->vendor_id)
{
case 0x01a010de:
mem_size = 1024 * 1024 *
(((((*pci_bus->read_pci_config)(0, 0, 1, 0x7c, 4)) & 0x000007c0) >> 6) + 1);
if (si->ps.memory_size > mem_size) si->ps.memory_size = mem_size;
si->ps.memory_size -= (64 * 1024);
break;
case 0x01f010de:
mem_size = 1024 * 1024 *
(((((*pci_bus->read_pci_config)(0, 0, 1, 0x84, 4)) & 0x000007f0) >> 4) + 1);
if (si->ps.memory_size > mem_size) si->ps.memory_size = mem_size;
si->ps.memory_size -= (64 * 1024);
break;
default:
* accelerant. */
break;
}
disable_vbi_all(di->regs);
si->ps.int_assigned = false;
si->vblank = create_sem(0, di->name);
if (si->vblank < 0) goto mark_as_open;
thid = find_thread(NULL);
get_thread_info(thid, &thinfo);
set_sem_owner(si->vblank, thinfo.team);
if ((di->pcii.u.h0.interrupt_pin == 0x00) ||
(di->pcii.u.h0.interrupt_line == 0xff) ||
(di->pcii.u.h0.interrupt_line <= 0x02))
{
delete_sem(si->vblank);
si->vblank = -1;
}
else
{
result = install_io_interrupt_handler(di->pcii.u.h0.interrupt_line, nv_interrupt, (void *)di, 0);
if (result != B_OK)
{
delete_sem(si->vblank);
si->vblank = -1;
}
else
{
si->ps.int_assigned = true;
}
}
mark_as_open:
di->is_open++;
*cookie = di;
TRACE("open_hook: device is open\n");
goto done;
free_shared_and_alldma:
delete_area(si->dma_area);
si->dma_area = -1;
si->dma_buffer = NULL;
free_shared_and_uadma:
delete_area(si->unaligned_dma_area);
si->unaligned_dma_area = -1;
si->dma_buffer_pci = NULL;
free_shared:
delete_area(di->shared_area);
di->shared_area = -1;
di->si = NULL;
TRACE("open_hook: device is freed\n");
done:
RELEASE_BEN(pd->kernel);
return result;
}
static status_t
read_hook(void* dev, off_t pos, void* buf, size_t* len)
{
*len = 0;
return B_NOT_ALLOWED;
}
static status_t
write_hook(void* dev, off_t pos, const void* buf, size_t* len)
{
*len = 0;
return B_NOT_ALLOWED;
}
static status_t
close_hook(void* dev)
{
CALLED();
return B_NO_ERROR;
}
static status_t
free_hook(void* dev)
{
device_info *di = (device_info *)dev;
shared_info *si = di->si;
vuint32 *regs = di->regs;
CALLED();
AQUIRE_BEN(pd->kernel);
if (di->is_open > 1)
goto unlock_and_exit;
disable_vbi_all(regs);
if (si->ps.int_assigned) {
remove_io_interrupt_handler(di->pcii.u.h0.interrupt_line, nv_interrupt, di);
team may have died on us) */
delete_sem(si->vblank);
si->vblank = -1;
}
unmap_device(di);
delete_area(si->dma_area);
si->dma_area = -1;
si->dma_buffer = NULL;
delete_area(si->unaligned_dma_area);
si->unaligned_dma_area = -1;
si->dma_buffer_pci = NULL;
delete_area(di->shared_area);
di->shared_area = -1;
di->si = NULL;
unlock_and_exit:
di->is_open--;
RELEASE_BEN(pd->kernel);
return B_OK;
}
static status_t
control_hook(void* dev, uint32 msg, void *buf, size_t len)
{
device_info *di = (device_info *)dev;
status_t result = B_DEV_INVALID_IOCTL;
uint32 tmpUlong;
switch (msg) {
case B_GET_ACCELERANT_SIGNATURE: {
TRACE("return signature\n");
if (user_strlcpy((char* )buf, sSettings.accelerant, len) < B_OK)
return B_BAD_ADDRESS;
result = B_OK;
break;
}
case NV_GET_PRIVATE_DATA: {
TRACE("return private data\n");
nv_get_private_data gpd;
if (user_memcpy(&gpd, buf, sizeof(nv_get_private_data)) < B_OK)
return B_BAD_ADDRESS;
if (gpd.magic == NV_PRIVATE_DATA_MAGIC) {
gpd.shared_info_area = di->shared_area;
result = user_memcpy(buf, &gpd, sizeof(nv_get_private_data));
}
break;
}
case NV_GET_PCI: {
nv_get_set_pci gsp;
if (user_memcpy(&gsp, buf, sizeof(nv_get_set_pci)) < B_OK)
return B_BAD_ADDRESS;
if (gsp.magic == NV_PRIVATE_DATA_MAGIC) {
pci_info *pcii = &(di->pcii);
gsp.value = get_pci(gsp.offset, gsp.size);
result = user_memcpy(buf, &gsp, sizeof(nv_get_set_pci));
}
break;
}
case NV_SET_PCI: {
nv_get_set_pci gsp;
if (user_memcpy(&gsp, buf, sizeof(nv_get_set_pci)) < B_OK)
return B_BAD_ADDRESS;
if (gsp.magic == NV_PRIVATE_DATA_MAGIC) {
pci_info *pcii = &(di->pcii);
set_pci(gsp.offset, gsp.size, gsp.value);
result = B_OK;
}
break;
}
case NV_DEVICE_NAME: {
TRACE("return device name\n");
nv_device_name dn;
if (user_memcpy(&dn, buf, sizeof(nv_device_name)) < B_OK)
return B_BAD_ADDRESS;
if (dn.magic == NV_PRIVATE_DATA_MAGIC) {
if (user_strlcpy(dn.name, di->name, B_OS_NAME_LENGTH) < B_OK)
return B_BAD_ADDRESS;
result = B_OK;
}
break;
}
case NV_RUN_INTERRUPTS: {
nv_set_vblank_int vi;
if (user_memcpy(&vi, buf, sizeof(nv_set_vblank_int)) < B_OK)
return B_BAD_ADDRESS;
if (vi.magic == NV_PRIVATE_DATA_MAGIC) {
vuint32 *regs = di->regs;
if (!(vi.crtc)) {
if (vi.do_it) {
enable_vbi_crtc1(regs);
} else {
disable_vbi_crtc1(regs);
}
} else {
if (vi.do_it) {
enable_vbi_crtc2(regs);
} else {
disable_vbi_crtc2(regs);
}
}
result = B_OK;
}
break;
}
case NV_GET_NTH_AGP_INFO: {
nv_nth_agp_info nai;
if (user_memcpy(&nai, buf, sizeof(nv_nth_agp_info)) < B_OK)
return B_BAD_ADDRESS;
if (nai.magic == NV_PRIVATE_DATA_MAGIC) {
nai.exist = false;
nai.agp_bus = false;
if (agp_bus) {
nai.agp_bus = true;
if ((*agp_bus->get_nth_agp_info)(nai.index, &(nai.agpi)) == B_NO_ERROR) {
nai.exist = true;
}
}
result = user_memcpy(buf, &nai, sizeof(nv_nth_agp_info));
}
break;
}
case NV_ENABLE_AGP: {
nv_cmd_agp nca;
if (user_memcpy(&nca, buf, sizeof(nv_cmd_agp)) < B_OK)
return B_BAD_ADDRESS;
if (nca.magic == NV_PRIVATE_DATA_MAGIC) {
if (agp_bus) {
nca.agp_bus = true;
nca.cmd = agp_bus->set_agp_mode(nca.cmd);
} else {
nca.agp_bus = false;
nca.cmd = 0;
}
result = user_memcpy(buf, &nca, sizeof(nv_cmd_agp));
}
break;
}
case NV_ISA_OUT: {
nv_in_out_isa io_isa;
if (user_memcpy(&io_isa, buf, sizeof(nv_in_out_isa)) < B_OK)
return B_BAD_ADDRESS;
if (io_isa.magic == NV_PRIVATE_DATA_MAGIC) {
pci_info *pcii = &(di->pcii);
* no other graphics card may have ISA I/O enabled when we enter */
AQUIRE_BEN(pd->kernel);
tmpUlong = get_pci(PCI_command, 2);
tmpUlong |= PCI_command_io;
set_pci(PCI_command, 2, tmpUlong);
if (io_isa.size == 1)
isa_bus->write_io_8(io_isa.adress, (uint8)io_isa.data);
else
isa_bus->write_io_16(io_isa.adress, io_isa.data);
result = B_OK;
tmpUlong = get_pci(PCI_command, 2);
tmpUlong &= ~PCI_command_io;
set_pci(PCI_command, 2, tmpUlong);
RELEASE_BEN(pd->kernel);
}
break;
}
case NV_ISA_IN: {
nv_in_out_isa io_isa;
if (user_memcpy(&io_isa, buf, sizeof(nv_in_out_isa)) < B_OK)
return B_BAD_ADDRESS;
if (io_isa.magic == NV_PRIVATE_DATA_MAGIC) {
pci_info *pcii = &(di->pcii);
* no other graphics card may have ISA I/O enabled when we enter */
AQUIRE_BEN(pd->kernel);
tmpUlong = get_pci(PCI_command, 2);
tmpUlong |= PCI_command_io;
set_pci(PCI_command, 2, tmpUlong);
if (io_isa.size == 1)
io_isa.data = isa_bus->read_io_8(io_isa.adress);
else
io_isa.data = isa_bus->read_io_16(io_isa.adress);
result = user_memcpy(buf, &io_isa, sizeof(nv_in_out_isa));
tmpUlong = get_pci(PCI_command, 2);
tmpUlong &= ~PCI_command_io;
set_pci(PCI_command, 2, tmpUlong);
RELEASE_BEN(pd->kernel);
}
break;
}
}
return result;
}
status_t
init_hardware(void)
{
long index = 0;
pci_info pcii;
bool found = false;
CALLED();
if (get_module(B_PCI_MODULE_NAME, (module_info **)&pci_bus) != B_OK)
return B_ERROR;
if (get_module(B_ISA_MODULE_NAME, (module_info **)&isa_bus) != B_OK)
{
put_module(B_PCI_MODULE_NAME);
return B_ERROR;
}
while ((*pci_bus->get_nth_pci_info)(index, &pcii) == B_NO_ERROR) {
int vendor = 0;
while (SupportedDevices[vendor].vendor) {
if (SupportedDevices[vendor].vendor == pcii.vendor_id) {
uint16 *devices = SupportedDevices[vendor].devices;
while (*devices) {
if (*devices == pcii.device_id ) {
found = true;
goto done;
}
devices++;
}
}
vendor++;
}
index++;
}
done:
if (found) {
TRACE ("init_hardware: found device\n");
} else {
TRACE ("init_hardware: no supported device found\n");
}
put_module(B_PCI_MODULE_NAME);
return found ? B_OK : B_ERROR;
}
status_t
init_driver(void)
{
void *settings;
CALLED();
settings = load_driver_settings(DRIVER_PREFIX ".settings");
if (settings != NULL) {
const char *item;
char *end;
uint32 value;
TRACE("init_driver: nvidia.settings loaded\n");
item = get_driver_parameter(settings, "accelerant", "", "");
if (item[0] && strlen(item) < sizeof(sSettings.accelerant) - 1)
strcpy (sSettings.accelerant, item);
item = get_driver_parameter(settings, "primary", "", "");
if (item[0] && strlen(item) < sizeof(sSettings.primary) - 1)
strcpy(sSettings.primary, item);
sSettings.dumprom = get_driver_boolean_parameter(settings,
"dumprom", false, false);
if (sSettings.dumprom) {
TRACE("dumprom requested\n");
} else {
TRACE("no dumprom requested\n");
}
item = get_driver_parameter(settings, "logmask",
"0x00000000", "0x00000000");
value = strtoul(item, &end, 0);
if (*end == '\0')
sSettings.logmask = value;
item = get_driver_parameter(settings, "memory", "0", "0");
value = strtoul(item, &end, 0);
if (*end == '\0')
sSettings.memory = value;
item = get_driver_parameter(settings, "tv_output", "0", "0");
value = strtoul(item, &end, 0);
if (*end == '\0')
sSettings.tv_output = value;
sSettings.hardcursor = get_driver_boolean_parameter(settings,
"hardcursor", true, true);
sSettings.usebios = get_driver_boolean_parameter(settings,
"usebios", true, true);
sSettings.switchhead = get_driver_boolean_parameter(settings,
"switchhead", false, false);
sSettings.force_pci = get_driver_boolean_parameter(settings,
"force_pci", false, false);
sSettings.unhide_fw = get_driver_boolean_parameter(settings,
"unhide_fw", false, false);
sSettings.pgm_panel = get_driver_boolean_parameter(settings,
"pgm_panel", false, false);
sSettings.dma_acc = get_driver_boolean_parameter(settings,
"dma_acc", true, true);
sSettings.vga_on_tv = get_driver_boolean_parameter(settings,
"vga_on_tv", false, false);
sSettings.force_sync = get_driver_boolean_parameter(settings,
"force_sync", false, false);
sSettings.force_ws = get_driver_boolean_parameter(settings,
"force_ws", false, false);
sSettings.block_acc = get_driver_boolean_parameter(settings,
"block_acc", false, false);
sSettings.check_edid = get_driver_boolean_parameter(settings,
"check_edid", true, true);
item = get_driver_parameter(settings, "gpu_clk", "0", "0");
value = strtoul(item, &end, 0);
if (*end == '\0')
sSettings.gpu_clk = value;
item = get_driver_parameter(settings, "ram_clk", "0", "0");
value = strtoul(item, &end, 0);
if (*end == '\0')
sSettings.ram_clk = value;
unload_driver_settings(settings);
}
if (get_module(B_PCI_MODULE_NAME, (module_info **)&pci_bus) != B_OK)
return B_ERROR;
if (get_module(B_ISA_MODULE_NAME, (module_info **)&isa_bus) != B_OK) {
put_module(B_PCI_MODULE_NAME);
return B_ERROR;
}
get_module(B_AGP_GART_MODULE_NAME, (module_info **)&agp_bus);
pd = (DeviceData *)calloc(1, sizeof(DeviceData));
if (!pd) {
put_module(B_PCI_MODULE_NAME);
return B_ERROR;
}
INIT_BEN(pd->kernel);
probe_devices();
TRACE("init_driver: completed OK\n");
return B_OK;
}
const char **
publish_devices(void)
{
CALLED();
return (const char **)pd->device_names;
}
device_hooks *
find_device(const char *name)
{
int index = 0;
while (pd->device_names[index]) {
if (strcmp(name, pd->device_names[index]) == 0)
return &graphics_device_hooks;
index++;
}
return NULL;
}
void
uninit_driver(void)
{
CALLED();
DELETE_BEN(pd->kernel);
free(pd);
pd = NULL;
put_module(B_PCI_MODULE_NAME);
put_module(B_ISA_MODULE_NAME);
if (agp_bus)
put_module(B_AGP_GART_MODULE_NAME);
}