diff options
author | Jonathan Gray <jsg@cvs.openbsd.org> | 2013-12-08 07:54:07 +0000 |
---|---|---|
committer | Jonathan Gray <jsg@cvs.openbsd.org> | 2013-12-08 07:54:07 +0000 |
commit | 675367018ae5dd103b7a3e95525139dc88dfd463 (patch) | |
tree | e3124805785a471ea1eff1de7340bc0e4993a689 /sys/dev | |
parent | 3bfd7ffff491659d31f0da64750af0a0485bef71 (diff) |
add static back to the ttm functions
ok kettenis@
Diffstat (limited to 'sys/dev')
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_agp_backend.c | 24 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_bo.c | 217 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_bo_util.c | 10 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_lock.c | 52 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_memory.c | 86 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_page_alloc.c | 70 | ||||
-rw-r--r-- | sys/dev/pci/drm/ttm/ttm_tt.c | 51 |
7 files changed, 156 insertions, 354 deletions
diff --git a/sys/dev/pci/drm/ttm/ttm_agp_backend.c b/sys/dev/pci/drm/ttm/ttm_agp_backend.c index 121e7aadb12..5f6f4a8c485 100644 --- a/sys/dev/pci/drm/ttm/ttm_agp_backend.c +++ b/sys/dev/pci/drm/ttm/ttm_agp_backend.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_agp_backend.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */ +/* $OpenBSD: ttm_agp_backend.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA @@ -45,12 +45,7 @@ struct ttm_agp_backend { struct drm_agp_head *agp; }; -int ttm_agp_bind(struct ttm_tt *, struct ttm_mem_reg *); -int ttm_agp_unbind(struct ttm_tt *); -void ttm_agp_destroy(struct ttm_tt *); - -int -ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct drm_mm_node *node = bo_mem->mm_node; @@ -77,8 +72,7 @@ ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) return 0; } -int -ttm_agp_unbind(struct ttm_tt *ttm) +static int ttm_agp_unbind(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct agp_softc *sc = agp_be->agp->agpdev; @@ -98,8 +92,7 @@ ttm_agp_unbind(struct ttm_tt *ttm) return 0; } -void -ttm_agp_destroy(struct ttm_tt *ttm) +static void ttm_agp_destroy(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -115,8 +108,7 @@ static struct ttm_backend_func ttm_agp_func = { .destroy = ttm_agp_destroy, }; -struct ttm_tt * -ttm_agp_tt_create(struct ttm_bo_device *bdev, +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, struct drm_agp_head *agp, unsigned long size, uint32_t page_flags, struct vm_page *dummy_read_page) @@ -139,8 +131,7 @@ ttm_agp_tt_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_agp_tt_create); -int -ttm_agp_tt_populate(struct ttm_tt *ttm) +int ttm_agp_tt_populate(struct ttm_tt *ttm) { if (ttm->state != tt_unpopulated) return 0; @@ -149,8 +140,7 @@ ttm_agp_tt_populate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_agp_tt_populate); -void -ttm_agp_tt_unpopulate(struct ttm_tt *ttm) +void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) { ttm_pool_unpopulate(ttm); } diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c index e49187823d3..97b1b6351f2 100644 --- a/sys/dev/pci/drm/ttm/ttm_bo.c +++ b/sys/dev/pci/drm/ttm/ttm_bo.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_bo.c,v 1.4 2013/10/30 02:11:33 dlg Exp $ */ +/* $OpenBSD: ttm_bo.c,v 1.5 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA @@ -40,40 +40,14 @@ #define TTM_DEBUG(fmt, arg...) #define TTM_BO_HASH_ORDER 13 -int ttm_bo_setup_vm(struct ttm_buffer_object *bo); -int ttm_bo_swapout(struct ttm_mem_shrink *shrink); - -void ttm_bo_global_kobj_release(struct ttm_bo_global *glob); - -int ttm_bo_move_buffer(struct ttm_buffer_object *, - struct ttm_placement *, bool, bool); -int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *, - bool, bool); -int ttm_bo_evict(struct ttm_buffer_object *, bool, bool); -void ttm_bo_vm_insert_rb(struct ttm_buffer_object *); -void ttm_mem_type_debug(struct ttm_bo_device *, int); -void ttm_bo_mem_space_debug(struct ttm_buffer_object *, - struct ttm_placement *); -void ttm_bo_release_list(struct ttm_buffer_object *); -int ttm_bo_add_ttm(struct ttm_buffer_object *, bool); -int ttm_bo_handle_move_mem(struct ttm_buffer_object *, - struct ttm_mem_reg *, bool, bool, bool); -void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *); -void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *); -int ttm_bo_delayed_delete(struct ttm_bo_device *, bool); -void ttm_bo_release(struct ttm_buffer_object *); -int ttm_mem_evict_first(struct ttm_bo_device *, uint32_t, bool, bool); -int ttm_bo_mem_force_space(struct ttm_buffer_object *, uint32_t, - struct ttm_placement *, struct ttm_mem_reg *, bool, bool); -uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *, uint32_t, - uint32_t); -bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *, uint32_t, - uint32_t, uint32_t *); -int ttm_bo_mem_compat(struct ttm_placement *, struct ttm_mem_reg *); -int ttm_bo_force_list_clean(struct ttm_bo_device *, unsigned, bool); -void ttm_bo_delayed_tick(void *); +static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); +static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); +static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob); void ttm_bo_delayed_workqueue(void *, void *); +int ttm_bo_move_buffer(struct ttm_buffer_object *, struct ttm_placement *, + bool, bool); + #ifdef notyet static struct attribute ttm_bo_count = { .name = "bo_count", @@ -81,8 +55,7 @@ static struct attribute ttm_bo_count = { }; #endif -static inline int -ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) +static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) { int i; @@ -94,8 +67,7 @@ ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) return -EINVAL; } -void -ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) +static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -110,8 +82,7 @@ ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) (*man->func->debug)(man, TTM_PFX); } -void -ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, +static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { int i, ret, mem_type; @@ -131,8 +102,7 @@ ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, } #ifdef notyet -ssize_t -ttm_bo_global_show(struct kobject *kobj, +static ssize_t ttm_bo_global_show(struct kobject *kobj, struct attribute *attr, char *buffer) { @@ -160,14 +130,12 @@ static struct kobj_type ttm_bo_glob_kobj_type = { #endif -static inline uint32_t -ttm_bo_type_flags(unsigned type) +static inline uint32_t ttm_bo_type_flags(unsigned type) { return 1 << (type); } -void -ttm_bo_release_list(struct ttm_buffer_object *bo) +static void ttm_bo_release_list(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; @@ -191,8 +159,7 @@ ttm_bo_release_list(struct ttm_buffer_object *bo) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } -int -ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) { int ret = 0; @@ -208,8 +175,7 @@ ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) } EXPORT_SYMBOL(ttm_bo_wait_unreserved); -void -ttm_bo_add_to_lru(struct ttm_buffer_object *bo) +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -231,8 +197,7 @@ ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } -int -ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; @@ -253,8 +218,7 @@ ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; } -int -ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { @@ -310,8 +274,7 @@ ttm_bo_reserve_locked(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_reserve); -void -ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, +void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, bool never_free) { u_int old; @@ -324,8 +287,7 @@ ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, } } -int -ttm_bo_reserve(struct ttm_buffer_object *bo, +int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { @@ -345,16 +307,14 @@ ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } -void -ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) { ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wakeup(&bo->event_queue); } -void -ttm_bo_unreserve(struct ttm_buffer_object *bo) +void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; @@ -367,8 +327,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve); /* * Call bo->rwlock locked. */ -int -ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) +static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; @@ -412,8 +371,7 @@ ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) return ret; } -int -ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, +static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_gpu) @@ -526,8 +484,7 @@ out_err: * Will release the bo::reserved lock. */ -void -ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) +static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, NULL); @@ -552,8 +509,7 @@ ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) smp_mb__before_atomic_dec(); } -void -ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) +static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; @@ -611,8 +567,7 @@ ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. */ -int -ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, +static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { @@ -698,8 +653,7 @@ ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * encountered buffers. */ -int -ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) +static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry = NULL; @@ -749,8 +703,7 @@ out: return ret; } -void -ttm_bo_delayed_tick(void *arg) +static void ttm_bo_delayed_tick(void *arg) { struct ttm_bo_device *bdev = arg; @@ -768,8 +721,7 @@ ttm_bo_delayed_workqueue(void *arg1, void *arg2) } } -void -ttm_bo_release(struct ttm_buffer_object *bo) +static void ttm_bo_release(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; @@ -790,8 +742,7 @@ ttm_bo_release(struct ttm_buffer_object *bo) ttm_bo_release_list(bo); } -void -ttm_bo_unref(struct ttm_buffer_object **p_bo) +void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; @@ -801,8 +752,7 @@ ttm_bo_unref(struct ttm_buffer_object **p_bo) } EXPORT_SYMBOL(ttm_bo_unref); -int -ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) { timeout_del(&bdev->to); task_del(systq, &bdev->task); @@ -810,8 +760,7 @@ ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) } EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); -void -ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) { if (resched) timeout_add(&bdev->to, @@ -819,8 +768,7 @@ ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) } EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); -int -ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, +static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; @@ -875,8 +823,7 @@ out: return ret; } -int -ttm_mem_evict_first(struct ttm_bo_device *bdev, +static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait_gpu) @@ -923,8 +870,7 @@ ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -void -ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; @@ -937,8 +883,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ -int -ttm_bo_mem_force_space(struct ttm_buffer_object *bo, +static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, struct ttm_placement *placement, struct ttm_mem_reg *mem, @@ -966,8 +911,7 @@ ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return 0; } -uint32_t -ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -992,8 +936,7 @@ ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -bool -ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, +static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, uint32_t mem_type, uint32_t proposed_placement, uint32_t *masked_placement) @@ -1020,8 +963,7 @@ ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, * ttm_bo_mem_force_space is attempted in priority order to evict and find * space. */ -int -ttm_bo_mem_space(struct ttm_buffer_object *bo, +int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, @@ -1128,8 +1070,7 @@ ttm_bo_mem_space(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_mem_space); -int -ttm_bo_move_buffer(struct ttm_buffer_object *bo, +int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_gpu) @@ -1170,8 +1111,7 @@ out_unlock: return ret; } -int -ttm_bo_mem_compat(struct ttm_placement *placement, +static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; @@ -1191,8 +1131,7 @@ ttm_bo_mem_compat(struct ttm_placement *placement, return -1; } -int -ttm_bo_validate(struct ttm_buffer_object *bo, +int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_gpu) @@ -1234,8 +1173,7 @@ ttm_bo_validate(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_validate); -int -ttm_bo_check_placement(struct ttm_buffer_object *bo, +int ttm_bo_check_placement(struct ttm_buffer_object *bo, struct ttm_placement *placement) { BUG_ON((placement->fpfn || placement->lpfn) && @@ -1244,8 +1182,7 @@ ttm_bo_check_placement(struct ttm_buffer_object *bo, return 0; } -int -ttm_bo_init(struct ttm_bo_device *bdev, +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, @@ -1344,8 +1281,7 @@ out_err: } EXPORT_SYMBOL(ttm_bo_init); -size_t -ttm_bo_acc_size(struct ttm_bo_device *bdev, +size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size) { @@ -1359,8 +1295,7 @@ ttm_bo_acc_size(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_acc_size); -size_t -ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, +size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size) { @@ -1375,8 +1310,7 @@ ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_dma_acc_size); -int -ttm_bo_create(struct ttm_bo_device *bdev, +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, @@ -1404,8 +1338,7 @@ ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int -ttm_bo_force_list_clean(struct ttm_bo_device *bdev, +static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, unsigned mem_type, bool allow_errors) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -1433,8 +1366,7 @@ ttm_bo_force_list_clean(struct ttm_bo_device *bdev, return 0; } -int -ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_mem_type_manager *man; int ret = -EINVAL; @@ -1465,8 +1397,7 @@ ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_clean_mm); -int -ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -1484,8 +1415,7 @@ ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_evict_mm); -int -ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, unsigned long p_size) { int ret = -EINVAL; @@ -1520,8 +1450,7 @@ ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, } EXPORT_SYMBOL(ttm_bo_init_mm); -void -ttm_bo_global_kobj_release(struct ttm_bo_global *glob) +static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob) { ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); @@ -1529,8 +1458,7 @@ ttm_bo_global_kobj_release(struct ttm_bo_global *glob) drm_free(glob); } -void -ttm_bo_global_release(struct drm_global_reference *ref) +void ttm_bo_global_release(struct drm_global_reference *ref) { struct ttm_bo_global *glob = ref->object; @@ -1539,8 +1467,7 @@ ttm_bo_global_release(struct drm_global_reference *ref) } EXPORT_SYMBOL(ttm_bo_global_release); -int -ttm_bo_global_init(struct drm_global_reference *ref) +int ttm_bo_global_init(struct drm_global_reference *ref) { struct ttm_bo_global_ref *bo_ref = container_of(ref, struct ttm_bo_global_ref, ref); @@ -1582,8 +1509,7 @@ out_no_drp: EXPORT_SYMBOL(ttm_bo_global_init); -int -ttm_bo_device_release(struct ttm_bo_device *bdev) +int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; @@ -1630,8 +1556,7 @@ ttm_bo_device_release(struct ttm_bo_device *bdev) } EXPORT_SYMBOL(ttm_bo_device_release); -int -ttm_bo_device_init(struct ttm_bo_device *bdev, +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, struct ttm_bo_driver *driver, uint64_t file_page_offset, @@ -1681,8 +1606,7 @@ EXPORT_SYMBOL(ttm_bo_device_init); * buffer object vm functions. */ -bool -ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; @@ -1699,8 +1623,7 @@ ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return true; } -void -ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_tt *ttm = bo->ttm; struct vm_page *page; @@ -1729,8 +1652,7 @@ ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) ttm_mem_io_free_vm(bo); } -void -ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; @@ -1743,8 +1665,7 @@ ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); -void -ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) +static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; @@ -1763,8 +1684,7 @@ ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) * placed in the drm device address space. */ -int -ttm_bo_setup_vm(struct ttm_buffer_object *bo) +static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; int ret; @@ -1801,8 +1721,7 @@ out_unlock: return ret; } -int -ttm_bo_wait(struct ttm_buffer_object *bo, +int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; @@ -1857,8 +1776,7 @@ ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -int -ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; int ret = 0; @@ -1880,8 +1798,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) } EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); -void -ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) { atomic_dec(&bo->cpu_writers); } @@ -1892,8 +1809,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release); * buffer object on the bo_global::swap_lru list. */ -int -ttm_bo_swapout(struct ttm_mem_shrink *shrink) +static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { struct ttm_bo_global *glob = container_of(shrink, struct ttm_bo_global, shrink); @@ -1979,8 +1895,7 @@ out: return ret; } -void -ttm_bo_swapout_all(struct ttm_bo_device *bdev) +void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c index 622a232c9e8..43b81910989 100644 --- a/sys/dev/pci/drm/ttm/ttm_bo_util.c +++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_bo_util.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */ +/* $OpenBSD: ttm_bo_util.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA @@ -37,8 +37,6 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *, void **); void ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *, void *); -int ttm_copy_io_page(void *, void *, unsigned long); -void ttm_transfered_destroy(struct ttm_buffer_object *); void *kmap(struct vm_page *); void kunmap(void *addr); @@ -245,8 +243,7 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, ttm_mem_io_unlock(man); } -int -ttm_copy_io_page(void *dst, void *src, unsigned long page) +static int ttm_copy_io_page(void *dst, void *src, unsigned long page) { uint32_t *dstP = (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); @@ -410,8 +407,7 @@ out: } EXPORT_SYMBOL(ttm_bo_move_memcpy); -void -ttm_transfered_destroy(struct ttm_buffer_object *bo) +static void ttm_transfered_destroy(struct ttm_buffer_object *bo) { free(bo, M_DRM); } diff --git a/sys/dev/pci/drm/ttm/ttm_lock.c b/sys/dev/pci/drm/ttm/ttm_lock.c index 7628e5a7a9f..c2f78a8780b 100644 --- a/sys/dev/pci/drm/ttm/ttm_lock.c +++ b/sys/dev/pci/drm/ttm/ttm_lock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_lock.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */ +/* $OpenBSD: ttm_lock.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA @@ -40,12 +40,6 @@ #define TTM_SUSPEND_LOCK (1 << 4) void ttm_write_lock_downgrade(struct ttm_lock *); -bool __ttm_read_lock(struct ttm_lock *); -bool __ttm_read_trylock(struct ttm_lock *, bool *); -bool __ttm_write_lock(struct ttm_lock *); -void ttm_vt_lock_remove(struct ttm_base_object **); -bool __ttm_vt_lock(struct ttm_lock *); -bool __ttm_suspend_lock(struct ttm_lock *); void ttm_lock_init(struct ttm_lock *lock) { @@ -69,12 +63,9 @@ void ttm_read_unlock(struct ttm_lock *lock) } EXPORT_SYMBOL(ttm_read_unlock); -bool -__ttm_read_lock(struct ttm_lock *lock) -{ - printf("%s stub\n", __func__); - return false; #ifdef notyet +static bool __ttm_read_lock(struct ttm_lock *lock) +{ bool locked = false; mtx_enter(&lock->lock); @@ -89,8 +80,8 @@ __ttm_read_lock(struct ttm_lock *lock) } mtx_leave(&lock->lock); return locked; -#endif } +#endif int ttm_read_lock(struct ttm_lock *lock, bool interruptible) { @@ -109,12 +100,9 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible) } EXPORT_SYMBOL(ttm_read_lock); -bool -__ttm_read_trylock(struct ttm_lock *lock, bool *locked) -{ - printf("%s stub\n", __func__); - return -ENOSYS; #ifdef notyet +static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) +{ bool block = true; *locked = false; @@ -135,8 +123,8 @@ __ttm_read_trylock(struct ttm_lock *lock, bool *locked) mtx_leave(&lock->lock); return !block; -#endif } +#endif int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) { @@ -170,12 +158,9 @@ void ttm_write_unlock(struct ttm_lock *lock) } EXPORT_SYMBOL(ttm_write_unlock); -bool -__ttm_write_lock(struct ttm_lock *lock) -{ - printf("%s stub\n", __func__); - return false; #ifdef notyet +static bool __ttm_write_lock(struct ttm_lock *lock) +{ bool locked = false; mtx_enter(&lock->lock); @@ -193,8 +178,8 @@ __ttm_write_lock(struct ttm_lock *lock) } mtx_leave(&lock->lock); return locked; -#endif } +#endif int ttm_write_lock(struct ttm_lock *lock, bool interruptible) { @@ -228,6 +213,7 @@ void ttm_write_lock_downgrade(struct ttm_lock *lock) mtx_leave(&lock->lock); } +#ifdef notyet static int __ttm_vt_unlock(struct ttm_lock *lock) { int ret = 0; @@ -241,9 +227,10 @@ static int __ttm_vt_unlock(struct ttm_lock *lock) return ret; } +#endif -void -ttm_vt_lock_remove(struct ttm_base_object **p_base) +#ifdef notyet +static void ttm_vt_lock_remove(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct ttm_lock *lock = container_of(base, struct ttm_lock, base); @@ -253,9 +240,10 @@ ttm_vt_lock_remove(struct ttm_base_object **p_base) ret = __ttm_vt_unlock(lock); BUG_ON(ret != 0); } +#endif -bool -__ttm_vt_lock(struct ttm_lock *lock) +#ifdef notyet +static bool __ttm_vt_lock(struct ttm_lock *lock) { bool locked = false; @@ -270,6 +258,7 @@ __ttm_vt_lock(struct ttm_lock *lock) mtx_leave(&lock->lock); return locked; } +#endif int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, @@ -327,8 +316,8 @@ void ttm_suspend_unlock(struct ttm_lock *lock) } EXPORT_SYMBOL(ttm_suspend_unlock); -bool -__ttm_suspend_lock(struct ttm_lock *lock) +#ifdef notyet +static bool __ttm_suspend_lock(struct ttm_lock *lock) { bool locked = false; @@ -343,6 +332,7 @@ __ttm_suspend_lock(struct ttm_lock *lock) mtx_leave(&lock->lock); return locked; } +#endif void ttm_suspend_lock(struct ttm_lock *lock) { diff --git a/sys/dev/pci/drm/ttm/ttm_memory.c b/sys/dev/pci/drm/ttm/ttm_memory.c index 1e1743f9fca..0aaf9a21ea1 100644 --- a/sys/dev/pci/drm/ttm/ttm_memory.c +++ b/sys/dev/pci/drm/ttm/ttm_memory.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_memory.c,v 1.2 2013/10/29 06:30:57 jsg Exp $ */ +/* $OpenBSD: ttm_memory.c,v 1.3 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA @@ -47,22 +47,6 @@ struct ttm_mem_zone { uint64_t used_mem; }; -void ttm_mem_zone_kobj_release(struct ttm_mem_zone *); -void ttm_mem_global_kobj_release(struct ttm_mem_global *); -bool ttm_zones_above_swap_target(struct ttm_mem_global *, - bool, uint64_t); -void ttm_shrink(struct ttm_mem_global *, bool, uint64_t); -int ttm_mem_init_kernel_zone(struct ttm_mem_global *, uint64_t); -int ttm_mem_init_dma32_zone(struct ttm_mem_global *, uint64_t); -void ttm_check_swapping(struct ttm_mem_global *); -void ttm_mem_global_free_zone(struct ttm_mem_global *, - struct ttm_mem_zone *, uint64_t); -int ttm_mem_global_reserve(struct ttm_mem_global *, - struct ttm_mem_zone *, uint64_t, bool); -int ttm_mem_global_alloc_zone(struct ttm_mem_global *, - struct ttm_mem_zone *, uint64_t, bool, bool); -void ttm_shrink_work(void *, void *); - #ifdef notyet static struct attribute ttm_mem_sys = { .name = "zone_memory", @@ -86,8 +70,7 @@ static struct attribute ttm_mem_used = { }; #endif -void -ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone) +static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone) { DRM_INFO("Zone %7s: Used memory at exit: %llu kiB\n", @@ -96,8 +79,7 @@ ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone) } #ifdef notyet -ssize_t -ttm_mem_zone_show(struct kobject *kobj, +static ssize_t ttm_mem_zone_show(struct kobject *kobj, struct attribute *attr, char *buffer) { @@ -121,11 +103,10 @@ ttm_mem_zone_show(struct kobject *kobj, return snprintf(buffer, PAGE_SIZE, "%llu\n", (unsigned long long) val >> 10); } -#endif -#ifdef notyet -ssize_t -ttm_mem_zone_store(struct kobject *kobj, +static void ttm_check_swapping(struct ttm_mem_global *glob); + +static ssize_t ttm_mem_zone_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t size) @@ -186,8 +167,7 @@ static struct kobj_type ttm_mem_zone_kobj_type = { }; #endif -void -ttm_mem_global_kobj_release(struct ttm_mem_global *glob) +static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob) { free(glob, M_DRM); @@ -199,8 +179,7 @@ static struct kobj_type ttm_mem_glob_kobj_type = { }; #endif -bool -ttm_zones_above_swap_target(struct ttm_mem_global *glob, +static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { unsigned int i; @@ -232,8 +211,7 @@ ttm_zones_above_swap_target(struct ttm_mem_global *glob, * many threads may try to swap out at any given time. */ -void -ttm_shrink(struct ttm_mem_global *glob, bool from_wq, +static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { int ret; @@ -256,16 +234,14 @@ out: mtx_leave(&glob->lock); } -void -ttm_shrink_work(void *arg1, void *arg2) +static void ttm_shrink_work(void *arg1, void *arg2) { struct ttm_mem_global *glob = arg1; ttm_shrink(glob, true, 0ULL); } -int -ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, +static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, uint64_t mem) { struct ttm_mem_zone *zone = malloc(sizeof(*zone), M_DRM, M_WAITOK | M_ZERO); @@ -287,8 +263,7 @@ ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, } #ifdef CONFIG_HIGHMEM -int -ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, +static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, uint64_t mem) { struct ttm_mem_zone *zone; @@ -313,8 +288,7 @@ ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, return 0; } #else -int -ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, +static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, uint64_t mem) { struct ttm_mem_zone *zone = malloc(sizeof(*zone), M_DRM, M_WAITOK | M_ZERO); @@ -352,8 +326,7 @@ ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, } #endif -int -ttm_mem_global_init(struct ttm_mem_global *glob) +int ttm_mem_global_init(struct ttm_mem_global *glob) { uint64_t mem; int ret; @@ -399,8 +372,7 @@ out_no_zone: } EXPORT_SYMBOL(ttm_mem_global_init); -void -ttm_mem_global_release(struct ttm_mem_global *glob) +void ttm_mem_global_release(struct ttm_mem_global *glob) { unsigned int i; struct ttm_mem_zone *zone; @@ -421,8 +393,7 @@ ttm_mem_global_release(struct ttm_mem_global *glob) } EXPORT_SYMBOL(ttm_mem_global_release); -void -ttm_check_swapping(struct ttm_mem_global *glob) +static void ttm_check_swapping(struct ttm_mem_global *glob) { bool needs_swapping = false; unsigned int i; @@ -447,8 +418,7 @@ ttm_check_swapping(struct ttm_mem_global *glob) task_add(glob->swap_queue, &glob->task); } -void -ttm_mem_global_free_zone(struct ttm_mem_global *glob, +static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t amount) { @@ -465,16 +435,14 @@ ttm_mem_global_free_zone(struct ttm_mem_global *glob, mtx_leave(&glob->lock); } -void -ttm_mem_global_free(struct ttm_mem_global *glob, +void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount) { return ttm_mem_global_free_zone(glob, NULL, amount); } EXPORT_SYMBOL(ttm_mem_global_free); -int -ttm_mem_global_reserve(struct ttm_mem_global *glob, +static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t amount, bool reserve) { @@ -514,8 +482,7 @@ out_unlock: } -int -ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, +static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t memory, bool no_wait, bool interruptible) @@ -536,8 +503,7 @@ ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, return 0; } -int -ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, bool no_wait, bool interruptible) { /** @@ -552,8 +518,7 @@ EXPORT_SYMBOL(ttm_mem_global_alloc); #define page_to_pfn(pp) (VM_PAGE_TO_PHYS(pp) / PAGE_SIZE) -int -ttm_mem_global_alloc_page(struct ttm_mem_global *glob, +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, struct vm_page *page, bool no_wait, bool interruptible) { @@ -576,8 +541,7 @@ ttm_mem_global_alloc_page(struct ttm_mem_global *glob, interruptible); } -void -ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page) +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page) { struct ttm_mem_zone *zone = NULL; @@ -591,8 +555,8 @@ ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page) ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); } -size_t -ttm_round_pot(size_t size) + +size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) return size; diff --git a/sys/dev/pci/drm/ttm/ttm_page_alloc.c b/sys/dev/pci/drm/ttm/ttm_page_alloc.c index e240701a547..ef28a9f130c 100644 --- a/sys/dev/pci/drm/ttm/ttm_page_alloc.c +++ b/sys/dev/pci/drm/ttm/ttm_page_alloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_page_alloc.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */ +/* $OpenBSD: ttm_page_alloc.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */ /* * Copyright (c) Red Hat Inc. @@ -140,27 +140,6 @@ static struct attribute *ttm_pool_attrs[] = { }; #endif -int set_pages_array_wb(struct vm_page **, int); -struct ttm_page_pool * - ttm_get_pool(int, enum ttm_caching_state); -void ttm_pages_put(struct vm_page *[], unsigned); -void ttm_pool_update_free_locked(struct ttm_page_pool *, unsigned); -int ttm_page_pool_free(struct ttm_page_pool *, unsigned); -int ttm_pool_get_num_unused_pages(void); -void ttm_pool_mm_shrink_init(struct ttm_pool_manager *); -void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *); -int ttm_set_pages_caching(struct vm_page **, enum ttm_caching_state, - unsigned); -void ttm_handle_caching_state_failure(struct pglist *, int, - enum ttm_caching_state, struct vm_page **, unsigned); -void ttm_page_pool_fill_locked(struct ttm_page_pool *, - int, enum ttm_caching_state, unsigned); -unsigned ttm_page_pool_get_pages(struct ttm_page_pool *, struct pglist *, - int, enum ttm_caching_state, unsigned); -void ttm_put_pages(struct vm_page **, unsigned, int, enum ttm_caching_state); -int ttm_get_pages(struct vm_page **, unsigned, int, enum ttm_caching_state); -void ttm_page_pool_init_locked(struct ttm_page_pool *, int, char *); - struct vm_page *ttm_uvm_alloc_page(void); void ttm_uvm_free_page(struct vm_page *); @@ -264,8 +243,7 @@ static struct kobj_type ttm_pool_kobj_type = { static struct ttm_pool_manager *_manager; -int -set_pages_array_wb(struct vm_page **pages, int addrinarray) +static int set_pages_array_wb(struct vm_page **pages, int addrinarray) { #ifdef TTM_HAS_AGP #if defined(__amd64__) || defined(__i386__) @@ -314,8 +292,7 @@ static int set_pages_array_uc(struct vm_page **pages, int addrinarray) /** * Select the right pool or requested caching state and ttm flags. */ -struct ttm_page_pool * -ttm_get_pool(int flags, +static struct ttm_page_pool *ttm_get_pool(int flags, enum ttm_caching_state cstate) { int pool_index; @@ -335,8 +312,7 @@ ttm_get_pool(int flags, } /* set memory back to wb and free the pages. */ -void -ttm_pages_put(struct vm_page *pages[], unsigned npages) +static void ttm_pages_put(struct vm_page *pages[], unsigned npages) { unsigned i; if (set_pages_array_wb(pages, npages)) @@ -345,8 +321,7 @@ ttm_pages_put(struct vm_page *pages[], unsigned npages) ttm_uvm_free_page(pages[i]); } -void -ttm_pool_update_free_locked(struct ttm_page_pool *pool, +static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, unsigned freed_pages) { pool->npages -= freed_pages; @@ -362,8 +337,7 @@ ttm_pool_update_free_locked(struct ttm_page_pool *pool, * @pool: to free the pages from * @free_all: If set to true will free all pages in pool **/ -int -ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) { struct vm_page *p, *p1; struct vm_page **pages_to_free; @@ -444,9 +418,9 @@ out: return nr_free; } +#ifdef notyet /* Get good estimation how many pages are free in pools */ -int -ttm_pool_get_num_unused_pages(void) +static int ttm_pool_get_num_unused_pages(void) { unsigned i; int total = 0; @@ -455,6 +429,7 @@ ttm_pool_get_num_unused_pages(void) return total; } +#endif /** * Callback for mm to request pool to reduce number of page held. @@ -483,8 +458,7 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink, } #endif -void -ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) +static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { printf("%s stub\n", __func__); #ifdef notyet @@ -494,8 +468,7 @@ ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) #endif } -void -ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) +static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) { printf("%s stub\n", __func__); #ifdef notyet @@ -503,8 +476,7 @@ ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) #endif } -int -ttm_set_pages_caching(struct vm_page **pages, +static int ttm_set_pages_caching(struct vm_page **pages, enum ttm_caching_state cstate, unsigned cpages) { int r = 0; @@ -531,8 +503,7 @@ ttm_set_pages_caching(struct vm_page **pages, * any pages that have changed their caching state already put them to the * pool. */ -void -ttm_handle_caching_state_failure(struct pglist *pages, +static void ttm_handle_caching_state_failure(struct pglist *pages, int ttm_flags, enum ttm_caching_state cstate, struct vm_page **failed_pages, unsigned cpages) { @@ -630,8 +601,7 @@ out: * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ -void -ttm_page_pool_fill_locked(struct ttm_page_pool *pool, +static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { struct vm_page *p; @@ -688,8 +658,7 @@ ttm_page_pool_fill_locked(struct ttm_page_pool *pool, * * @return count of pages still required to fulfill the request. */ -unsigned -ttm_page_pool_get_pages(struct ttm_page_pool *pool, +static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct pglist *pages, int ttm_flags, enum ttm_caching_state cstate, @@ -721,8 +690,7 @@ out: } /* Put all pages in pages list to correct pool to wait for reuse */ -void -ttm_put_pages(struct vm_page **pages, unsigned npages, int flags, +static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); @@ -765,8 +733,7 @@ ttm_put_pages(struct vm_page **pages, unsigned npages, int flags, * On success pages list will hold count number of correctly * cached pages. */ -int -ttm_get_pages(struct vm_page **pages, unsigned npages, int flags, +static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); @@ -845,8 +812,7 @@ ttm_get_pages(struct vm_page **pages, unsigned npages, int flags, return 0; } -void -ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, char *name) { mtx_init(&pool->lock, IPL_TTY); diff --git a/sys/dev/pci/drm/ttm/ttm_tt.c b/sys/dev/pci/drm/ttm/ttm_tt.c index 78fb5cf869e..88f99c213f3 100644 --- a/sys/dev/pci/drm/ttm/ttm_tt.c +++ b/sys/dev/pci/drm/ttm/ttm_tt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ttm_tt.c,v 1.1 2013/08/12 04:11:53 jsg Exp $ */ +/* $OpenBSD: ttm_tt.c,v 1.2 2013/12/08 07:54:06 jsg Exp $ */ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA @@ -39,21 +39,15 @@ #include <dev/pci/drm/ttm/ttm_placement.h> #include <dev/pci/drm/ttm/ttm_page_alloc.h> -void ttm_tt_alloc_page_directory(struct ttm_tt *); -void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *); -int ttm_tt_set_caching(struct ttm_tt *, enum ttm_caching_state); - /** * Allocates storage for pointers to the pages that back the ttm. */ -void -ttm_tt_alloc_page_directory(struct ttm_tt *ttm) +static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) { ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); } -void -ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) +static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) { ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, @@ -61,8 +55,7 @@ ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) } #ifdef CONFIG_X86 -static inline int -ttm_tt_set_page_caching(struct vm_page *p, +static inline int ttm_tt_set_page_caching(struct vm_page *p, enum ttm_caching_state c_old, enum ttm_caching_state c_new) { @@ -88,8 +81,7 @@ ttm_tt_set_page_caching(struct vm_page *p, return ret; } #else /* CONFIG_X86 */ -static inline int -ttm_tt_set_page_caching(struct vm_page *p, +static inline int ttm_tt_set_page_caching(struct vm_page *p, enum ttm_caching_state c_old, enum ttm_caching_state c_new) { @@ -102,8 +94,7 @@ ttm_tt_set_page_caching(struct vm_page *p, * for range of pages in a ttm. */ -int -ttm_tt_set_caching(struct ttm_tt *ttm, +static int ttm_tt_set_caching(struct ttm_tt *ttm, enum ttm_caching_state c_state) { int i, j; @@ -153,8 +144,7 @@ out_err: return ret; } -int -ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) { enum ttm_caching_state state; @@ -169,8 +159,7 @@ ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) } EXPORT_SYMBOL(ttm_tt_set_placement_caching); -void -ttm_tt_destroy(struct ttm_tt *ttm) +void ttm_tt_destroy(struct ttm_tt *ttm) { if (unlikely(ttm == NULL)) return; @@ -191,8 +180,7 @@ ttm_tt_destroy(struct ttm_tt *ttm) ttm->func->destroy(ttm); } -int -ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct vm_page *dummy_read_page) { @@ -215,16 +203,14 @@ ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_tt_init); -void -ttm_tt_fini(struct ttm_tt *ttm) +void ttm_tt_fini(struct ttm_tt *ttm) { drm_free_large(ttm->pages); ttm->pages = NULL; } EXPORT_SYMBOL(ttm_tt_fini); -int -ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct vm_page *dummy_read_page) { @@ -250,8 +236,7 @@ ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_dma_tt_init); -void -ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) { struct ttm_tt *ttm = &ttm_dma->ttm; @@ -262,8 +247,7 @@ ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) } EXPORT_SYMBOL(ttm_dma_tt_fini); -void -ttm_tt_unbind(struct ttm_tt *ttm) +void ttm_tt_unbind(struct ttm_tt *ttm) { int ret; @@ -274,8 +258,7 @@ ttm_tt_unbind(struct ttm_tt *ttm) } } -int -ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { int ret = 0; @@ -299,8 +282,7 @@ ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) } EXPORT_SYMBOL(ttm_tt_bind); -int -ttm_tt_swapin(struct ttm_tt *ttm) +int ttm_tt_swapin(struct ttm_tt *ttm) { struct uvm_object *swap_storage; struct vm_page *from_page; @@ -338,8 +320,7 @@ out_err: return ret; } -int -ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) +int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) { struct uvm_object *swap_storage; struct vm_page *from_page; |