Main Page | Data Structures | File List | Data Fields | Globals | Related Pages

dwc_otg_hcd_ddma.c

Go to the documentation of this file.
00001 /*==========================================================================
00002  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
00003  * $Revision: #10 $
00004  * $Date: 2011/10/20 $
00005  * $Change: 1869464 $
00006  *
00007  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
00008  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
00009  * otherwise expressly agreed to in writing between Synopsys and you.
00010  *
00011  * The Software IS NOT an item of Licensed Software or Licensed Product under
00012  * any End User Software License Agreement or Agreement for Licensed Product
00013  * with Synopsys or any supplement thereto. You are permitted to use and
00014  * redistribute this Software in source and binary forms, with or without
00015  * modification, provided that redistributions of source code must retain this
00016  * notice. You may not view, use, disclose, copy or distribute this file or
00017  * any information contained herein except pursuant to this license grant from
00018  * Synopsys. If you do not agree with this notice, including the disclaimer
00019  * below, then you are not authorized to use the Software.
00020  *
00021  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
00022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00024  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
00025  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
00026  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
00027  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00028  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00029  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
00030  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
00031  * DAMAGE.
00032  * ========================================================================== */
00033 #ifndef DWC_DEVICE_ONLY
00034 
00039 #include "dwc_otg_hcd.h"
00040 #include "dwc_otg_regs.h"
00041 
00042 static inline uint8_t frame_list_idx(uint16_t frame)
00043 {
00044         return (frame & (MAX_FRLIST_EN_NUM - 1));
00045 }
00046 
00047 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
00048 {
00049         return (idx + inc) &
00050             (((speed ==
00051                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
00052               MAX_DMA_DESC_NUM_GENERIC) - 1);
00053 }
00054 
00055 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
00056 {
00057         return (idx - inc) &
00058             (((speed ==
00059                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
00060               MAX_DMA_DESC_NUM_GENERIC) - 1);
00061 }
00062 
00063 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
00064 {
00065         return (((qh->ep_type == UE_ISOCHRONOUS)
00066                  && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
00067                 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
00068 }
00069 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
00070 {
00071         return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
00072                 ? ((qh->interval + 8 - 1) / 8)
00073                 : qh->interval);
00074 }
00075 
00076 static int desc_list_alloc(dwc_otg_qh_t * qh)
00077 {
00078         int retval = 0;
00079 
00080         qh->desc_list = (dwc_otg_host_dma_desc_t *)
00081             DWC_DMA_ALLOC(sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
00082                           &qh->desc_list_dma);
00083 
00084         if (!qh->desc_list) {
00085                 retval = -DWC_E_NO_MEMORY;
00086                 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
00087                 
00088         }
00089 
00090         dwc_memset(qh->desc_list, 0x00,
00091                    sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
00092 
00093         qh->n_bytes =
00094             (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
00095 
00096         if (!qh->n_bytes) {
00097                 retval = -DWC_E_NO_MEMORY;
00098                 DWC_ERROR
00099                     ("%s: Failed to allocate array for descriptors' size actual values\n",
00100                      __func__);
00101 
00102         }
00103         return retval;
00104 
00105 }
00106 
00107 static void desc_list_free(dwc_otg_qh_t * qh)
00108 {
00109         if (qh->desc_list) {
00110                 DWC_DMA_FREE(max_desc_num(qh), qh->desc_list,
00111                              qh->desc_list_dma);
00112                 qh->desc_list = NULL;
00113         }
00114 
00115         if (qh->n_bytes) {
00116                 DWC_FREE(qh->n_bytes);
00117                 qh->n_bytes = NULL;
00118         }
00119 }
00120 
00121 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
00122 {
00123         int retval = 0;
00124         if (hcd->frame_list)
00125                 return 0;
00126 
00127         hcd->frame_list = DWC_DMA_ALLOC(4 * MAX_FRLIST_EN_NUM,
00128                                         &hcd->frame_list_dma);
00129         if (!hcd->frame_list) {
00130                 retval = -DWC_E_NO_MEMORY;
00131                 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
00132         }
00133 
00134         dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
00135 
00136         return retval;
00137 }
00138 
00139 static void frame_list_free(dwc_otg_hcd_t * hcd)
00140 {
00141         if (!hcd->frame_list)
00142                 return;
00143         
00144         DWC_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
00145         hcd->frame_list = NULL;
00146 }
00147 
00148 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
00149 {
00150 
00151         hcfg_data_t hcfg;
00152 
00153         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
00154 
00155         if (hcfg.b.perschedena) {
00156                 /* already enabled */
00157                 return;
00158         }
00159 
00160         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
00161                         hcd->frame_list_dma);
00162 
00163         switch (fr_list_en) {
00164         case 64:
00165                 hcfg.b.frlisten = 3;
00166                 break;
00167         case 32:
00168                 hcfg.b.frlisten = 2;
00169                 break;
00170         case 16:
00171                 hcfg.b.frlisten = 1;
00172                 break;
00173         case 8:
00174                 hcfg.b.frlisten = 0;
00175                 break;
00176         default:
00177                 break;
00178         }
00179 
00180         hcfg.b.perschedena = 1;
00181 
00182         DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
00183         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
00184 
00185 }
00186 
00187 static void per_sched_disable(dwc_otg_hcd_t * hcd)
00188 {
00189         hcfg_data_t hcfg;
00190 
00191         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
00192         
00193         if (!hcfg.b.perschedena) {
00194                 /* already disabled */
00195                 return;
00196         }
00197         hcfg.b.perschedena = 0;
00198 
00199         DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
00200         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
00201 }
00202 
00203 /* 
00204  * Activates/Deactivates FrameList entries for the channel 
00205  * based on endpoint servicing period.
00206  */
00207 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
00208 {
00209         uint16_t i, j, inc;
00210         dwc_hc_t *hc = NULL;
00211 
00212         if (!qh->channel) {
00213                 DWC_ERROR("qh->channel = %p", qh->channel);
00214                 return;
00215         }
00216 
00217         if (!hcd) {
00218                 DWC_ERROR("------hcd = %p", hcd);
00219                 return;
00220         }
00221 
00222         if (!hcd->frame_list) {
00223                 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
00224                 return;
00225         }
00226 
00227         hc = qh->channel;
00228         inc = frame_incr_val(qh);
00229         if (qh->ep_type == UE_ISOCHRONOUS)
00230                 i = frame_list_idx(qh->sched_frame);
00231         else
00232                 i = 0;
00233 
00234         j = i;
00235         do {
00236                 if (enable)
00237                         hcd->frame_list[j] |= (1 << hc->hc_num);
00238                 else
00239                         hcd->frame_list[j] &= ~(1 << hc->hc_num);
00240                 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
00241         }
00242         while (j != i);
00243         if (!enable)
00244                 return;
00245         hc->schinfo = 0;
00246         if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
00247                 j = 1;
00248                 /* TODO - check this */
00249                 inc = (8 + qh->interval - 1) / qh->interval;
00250                 for (i = 0; i < inc; i++) {
00251                         hc->schinfo |= j;
00252                         j = j << qh->interval;
00253                 }
00254         } else {
00255                 hc->schinfo = 0xff;
00256         }
00257 }
00258 
00259 #if 1
00260 void dump_frame_list(dwc_otg_hcd_t * hcd)
00261 {
00262         int i = 0;
00263         DWC_PRINTF("--FRAME LIST (hex) --\n");
00264         for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
00265                 DWC_PRINTF("%x\t", hcd->frame_list[i]);
00266                 if (!(i % 8) && i)
00267                         DWC_PRINTF("\n");
00268         }
00269         DWC_PRINTF("\n----\n");
00270 
00271 }
00272 #endif
00273 
00274 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00275 {
00276         dwc_hc_t *hc = qh->channel;
00277         if (dwc_qh_is_non_per(qh))
00278                 hcd->non_periodic_channels--;
00279         else
00280                 update_frame_list(hcd, qh, 0);
00281 
00282         /* 
00283          * The condition is added to prevent double cleanup try in case of device
00284          * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
00285          */
00286         if (hc->qh) {
00287                 dwc_otg_hc_cleanup(hcd->core_if, hc);
00288                 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
00289                 hc->qh = NULL;
00290         }
00291 
00292         qh->channel = NULL;
00293         qh->ntd = 0;
00294 
00295         if (qh->desc_list) {
00296                 dwc_memset(qh->desc_list, 0x00,
00297                            sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
00298         }
00299 }
00300 
00312 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00313 {
00314         int retval = 0;
00315 
00316         if (qh->do_split) {
00317                 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
00318                 return -1;
00319         }
00320 
00321         retval = desc_list_alloc(qh);
00322 
00323         if ((retval == 0)
00324             && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
00325                 if (!hcd->frame_list) {
00326                         retval = frame_list_alloc(hcd);
00327                         /* Enable periodic schedule on first periodic QH */
00328                         if (retval == 0)
00329                                 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
00330                 }
00331         }
00332 
00333         qh->ntd = 0;
00334 
00335         return retval;
00336 }
00337 
00346 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00347 {
00348         desc_list_free(qh);
00349 
00350         /* 
00351          * Channel still assigned due to some reasons. 
00352          * Seen on Isoc URB dequeue. Channel halted but no subsequent
00353          * ChHalted interrupt to release the channel. Afterwards
00354          * when it comes here from endpoint disable routine
00355          * channel remains assigned.
00356          */
00357         if (qh->channel)
00358                 release_channel_ddma(hcd, qh);
00359 
00360         if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
00361             && !hcd->periodic_channels && hcd->frame_list) {
00362 
00363                 per_sched_disable(hcd);
00364                 frame_list_free(hcd);
00365         }
00366 }
00367 
00368 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
00369 {
00370         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
00371                 /* 
00372                  * Descriptor set(8 descriptors) index
00373                  * which is 8-aligned.
00374                  */
00375                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
00376         } else {
00377                 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
00378         }
00379 }
00380 
00381 /* 
00382  * Determine starting frame for Isochronous transfer. 
00383  * Few frames skipped to prevent race condition with HC. 
00384  */
00385 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
00386                                    uint8_t * skip_frames)
00387 {
00388         uint16_t frame = 0;
00389         hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
00390         
00391         /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
00392         
00393         /* 
00394          * skip_frames is used to limit activated descriptors number
00395          * to avoid the situation when HC services the last activated
00396          * descriptor firstly.
00397          * Example for FS:
00398          * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
00399          * corresponding to curr_frame+1, the descriptor corresponding to frame 2
00400          * will be fetched. If the number of descriptors is max=64 (or greather) the
00401          * list will be fully programmed with Active descriptors and it is possible
00402          * case(rare) that the latest descriptor(considering rollback) corresponding
00403          * to frame 2 will be serviced first. HS case is more probable because, in fact,
00404          * up to 11 uframes(16 in the code) may be skipped.
00405          */
00406         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
00407                 /* 
00408                  * Consider uframe counter also, to start xfer asap.
00409                  * If half of the frame elapsed skip 2 frames otherwise
00410                  * just 1 frame. 
00411                  * Starting descriptor index must be 8-aligned, so
00412                  * if the current frame is near to complete the next one
00413                  * is skipped as well.
00414                  */
00415 
00416                 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
00417                         *skip_frames = 2 * 8;
00418                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
00419                 } else {
00420                         *skip_frames = 1 * 8;
00421                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
00422                 }
00423 
00424                 frame = dwc_full_frame_num(frame);
00425         } else {
00426                 /* 
00427                  * Two frames are skipped for FS - the current and the next.
00428                  * But for descriptor programming, 1 frame(descriptor) is enough,
00429                  * see example above.
00430                  */
00431                 *skip_frames = 1;
00432                 frame = dwc_frame_num_inc(hcd->frame_number, 2);
00433         }
00434 
00435         return frame;
00436 }
00437 
00438 /* 
00439  * Calculate initial descriptor index for isochronous transfer
00440  * based on scheduled frame. 
00441  */
00442 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00443 {
00444         uint16_t frame = 0, fr_idx, fr_idx_tmp;
00445         uint8_t skip_frames = 0;
00446         /* 
00447          * With current ISOC processing algorithm the channel is being
00448          * released when no more QTDs in the list(qh->ntd == 0).
00449          * Thus this function is called only when qh->ntd == 0 and qh->channel == 0. 
00450          *
00451          * So qh->channel != NULL branch is not used and just not removed from the
00452          * source file. It is required for another possible approach which is,
00453          * do not disable and release the channel when ISOC session completed, 
00454          * just move QH to inactive schedule until new QTD arrives. 
00455          * On new QTD, the QH moved back to 'ready' schedule,
00456          * starting frame and therefore starting desc_index are recalculated.
00457          * In this case channel is released only on ep_disable.
00458          */
00459 
00460         /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
00461         if (qh->channel) {
00462                 frame = calc_starting_frame(hcd, qh, &skip_frames);
00463                 /* 
00464                  * Calculate initial descriptor index based on FrameList current bitmap
00465                  * and servicing period.
00466                  */
00467                 fr_idx_tmp = frame_list_idx(frame);
00468                 fr_idx =
00469                     (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
00470                      fr_idx_tmp)
00471                     % frame_incr_val(qh);
00472                 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
00473         } else {
00474                 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
00475                 fr_idx = frame_list_idx(qh->sched_frame);
00476         }
00477 
00478         qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
00479 
00480         return skip_frames;
00481 }
00482 
00483 #define ISOC_URB_GIVEBACK_ASAP
00484 
00485 #define MAX_ISOC_XFER_SIZE_FS 1023
00486 #define MAX_ISOC_XFER_SIZE_HS 3072
00487 #define DESCNUM_THRESHOLD 4
00488 
00489 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
00490                                uint8_t skip_frames)
00491 {
00492         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
00493         dwc_otg_qtd_t *qtd;
00494         dwc_otg_host_dma_desc_t *dma_desc;
00495         uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
00496 
00497         idx = qh->td_last;
00498         inc = qh->interval;
00499         n_desc = 0;
00500 
00501         ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
00502         if (skip_frames && !qh->channel)
00503                 ntd_max = ntd_max - skip_frames / qh->interval;
00504 
00505         max_xfer_size =
00506             (qh->dev_speed ==
00507              DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
00508             MAX_ISOC_XFER_SIZE_FS;
00509 
00510         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
00511                 while ((qh->ntd < ntd_max)
00512                        && (qtd->isoc_frame_index_last <
00513                            qtd->urb->packet_count)) {
00514 
00515                         dma_desc = &qh->desc_list[idx];
00516                         dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
00517 
00518                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
00519 
00520                         if (frame_desc->length > max_xfer_size)
00521                                 qh->n_bytes[idx] = max_xfer_size;
00522                         else
00523                                 qh->n_bytes[idx] = frame_desc->length;
00524                         dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
00525                         dma_desc->status.b_isoc.a = 1;
00526                         dma_desc->status.b_isoc.sts = 0;
00527 
00528                         dma_desc->buf = qtd->urb->dma + frame_desc->offset;
00529 
00530                         qh->ntd++;
00531 
00532                         qtd->isoc_frame_index_last++;
00533 
00534 #ifdef  ISOC_URB_GIVEBACK_ASAP
00535                         /* 
00536                          * Set IOC for each descriptor corresponding to the 
00537                          * last frame of the URB.
00538                          */
00539                         if (qtd->isoc_frame_index_last ==
00540                             qtd->urb->packet_count)
00541                                 dma_desc->status.b_isoc.ioc = 1;
00542 
00543 #endif
00544                         idx = desclist_idx_inc(idx, inc, qh->dev_speed);
00545                         n_desc++;
00546 
00547                 }
00548                 qtd->in_process = 1;
00549         }
00550 
00551         qh->td_last = idx;
00552 
00553 #ifdef  ISOC_URB_GIVEBACK_ASAP
00554         /* Set IOC for the last descriptor if descriptor list is full */
00555         if (qh->ntd == ntd_max) {
00556                 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
00557                 qh->desc_list[idx].status.b_isoc.ioc = 1;
00558         }
00559 #else
00560         /* 
00561          * Set IOC bit only for one descriptor. 
00562          * Always try to be ahead of HW processing,
00563          * i.e. on IOC generation driver activates next descriptors but
00564          * core continues to process descriptors followed the one with IOC set.
00565          */
00566 
00567         if (n_desc > DESCNUM_THRESHOLD) {
00568                 /* 
00569                  * Move IOC "up". Required even if there is only one QTD 
00570                  * in the list, cause QTDs migth continue to be queued,
00571                  * but during the activation it was only one queued.
00572                  * Actually more than one QTD might be in the list if this function called 
00573                  * from XferCompletion - QTDs was queued during HW processing of the previous
00574                  * descriptor chunk.
00575                  */
00576                 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
00577         } else {
00578                 /* 
00579                  * Set the IOC for the latest descriptor
00580                  * if either number of descriptor is not greather than threshold
00581                  * or no more new descriptors activated.
00582                  */
00583                 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
00584         }
00585 
00586         qh->desc_list[idx].status.b_isoc.ioc = 1;
00587 #endif
00588 }
00589 
00590 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00591 {
00592 
00593         dwc_hc_t *hc;
00594         dwc_otg_host_dma_desc_t *dma_desc;
00595         dwc_otg_qtd_t *qtd;
00596         int num_packets, len, n_desc = 0;
00597 
00598         hc = qh->channel;
00599 
00600         /* 
00601          * Start with hc->xfer_buff initialized in 
00602          * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
00603          * this pointer re-assigned to the buffer of the currently processed QTD.
00604          * For non-SG request there is always one QTD active.
00605          */
00606 
00607         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
00608 
00609                 if (n_desc) {
00610                         /* SG request - more than 1 QTDs */
00611                         hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
00612                         hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
00613                 }
00614 
00615                 qtd->n_desc = 0;
00616 
00617                 do {
00618                         dma_desc = &qh->desc_list[n_desc];
00619                         len = hc->xfer_len;
00620 
00621                         if (len > MAX_DMA_DESC_SIZE)
00622                                 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
00623 
00624                         if (hc->ep_is_in) {
00625                                 if (len > 0) {
00626                                         num_packets = (len + hc->max_packet - 1) / hc->max_packet;
00627                                 } else {
00628                                         /* Need 1 packet for transfer length of 0. */
00629                                         num_packets = 1;
00630                                 }
00631                                 /* Always program an integral # of max packets for IN transfers. */
00632                                 len = num_packets * hc->max_packet;
00633                         }
00634 
00635                         dma_desc->status.b.n_bytes = len;
00636 
00637                         qh->n_bytes[n_desc] = len;
00638 
00639                         if ((qh->ep_type == UE_CONTROL)
00640                             && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
00641                                 dma_desc->status.b.sup = 1;     /* Setup Packet */
00642 
00643                         dma_desc->status.b.a = 1;       /* Active descriptor */
00644                         dma_desc->status.b.sts = 0;
00645 
00646                         dma_desc->buf =
00647                             ((unsigned long)hc->xfer_buff & 0xffffffff);
00648 
00649                         /* 
00650                          * Last descriptor(or single) of IN transfer 
00651                          * with actual size less than MaxPacket.
00652                          */
00653                         if (len > hc->xfer_len) {
00654                                 hc->xfer_len = 0;
00655                         } else {
00656                                 hc->xfer_buff += len;
00657                                 hc->xfer_len -= len;
00658                         }
00659 
00660                         qtd->n_desc++;
00661                         n_desc++;
00662                 }
00663                 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
00664                 
00665 
00666                 qtd->in_process = 1;
00667 
00668                 if (qh->ep_type == UE_CONTROL)
00669                         break;
00670 
00671                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
00672                         break;
00673         }
00674 
00675         if (n_desc) {
00676                 /* Request Transfer Complete interrupt for the last descriptor */
00677                 qh->desc_list[n_desc - 1].status.b.ioc = 1;
00678                 /* End of List indicator */
00679                 qh->desc_list[n_desc - 1].status.b.eol = 1;
00680 
00681                 hc->ntd = n_desc;
00682         }
00683 }
00684 
00702 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
00703 {
00704         /* Channel is already assigned */
00705         dwc_hc_t *hc = qh->channel;
00706         uint8_t skip_frames = 0;
00707 
00708         switch (hc->ep_type) {
00709         case DWC_OTG_EP_TYPE_CONTROL:
00710         case DWC_OTG_EP_TYPE_BULK:
00711                 init_non_isoc_dma_desc(hcd, qh);
00712 
00713                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
00714                 break;
00715         case DWC_OTG_EP_TYPE_INTR:
00716                 init_non_isoc_dma_desc(hcd, qh);
00717 
00718                 update_frame_list(hcd, qh, 1);
00719 
00720                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
00721                 break;
00722         case DWC_OTG_EP_TYPE_ISOC:
00723 
00724                 if (!qh->ntd)
00725                         skip_frames = recalc_initial_desc_idx(hcd, qh);
00726 
00727                 init_isoc_dma_desc(hcd, qh, skip_frames);
00728 
00729                 if (!hc->xfer_started) {
00730 
00731                         update_frame_list(hcd, qh, 1);
00732 
00733                         /* 
00734                          * Always set to max, instead of actual size.
00735                          * Otherwise ntd will be changed with 
00736                          * channel being enabled. Not recommended.
00737                          *
00738                          */
00739                         hc->ntd = max_desc_num(qh);
00740                         /* Enable channel only once for ISOC */
00741                         dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
00742                 }
00743 
00744                 break;
00745         default:
00746 
00747                 break;
00748         }
00749 }
00750 
00751 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
00752                                     dwc_hc_t * hc,
00753                                     dwc_otg_hc_regs_t * hc_regs,
00754                                     dwc_otg_halt_status_e halt_status)
00755 {
00756         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
00757         dwc_otg_qtd_t *qtd, *qtd_tmp;
00758         dwc_otg_qh_t *qh;
00759         dwc_otg_host_dma_desc_t *dma_desc;
00760         uint16_t idx, remain;
00761         uint8_t urb_compl;
00762 
00763         qh = hc->qh;
00764         idx = qh->td_first;
00765 
00766         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
00767                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
00768                     qtd->in_process = 0;
00769                 return;
00770         } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
00771                    (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
00772                 /* 
00773                  * Channel is halted in these error cases.
00774                  * Considered as serious issues.
00775                  * Complete all URBs marking all frames as failed, 
00776                  * irrespective whether some of the descriptors(frames) succeeded or no.
00777                  * Pass error code to completion routine as well, to
00778                  * update urb->status, some of class drivers might use it to stop
00779                  * queing transfer requests.
00780                  */
00781                 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
00782                     ? (-DWC_E_IO)
00783                     : (-DWC_E_OVERFLOW);
00784                                                 
00785                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
00786                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
00787                                 frame_desc = &qtd->urb->iso_descs[idx];
00788                                 frame_desc->status = err;
00789                         }
00790                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
00791                         dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
00792                 }
00793                 return;
00794         }
00795 
00796         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
00797 
00798                 if (!qtd->in_process)
00799                         break;
00800 
00801                 urb_compl = 0;
00802 
00803                 do {
00804 
00805                         dma_desc = &qh->desc_list[idx];
00806                         
00807                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
00808                         remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
00809 
00810                         if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
00811                                 /* 
00812                                  * XactError or, unable to complete all the transactions 
00813                                  * in the scheduled micro-frame/frame, 
00814                                  * both indicated by DMA_DESC_STS_PKTERR.
00815                                  */
00816                                 qtd->urb->error_count++;
00817                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
00818                                 frame_desc->status = -DWC_E_PROTOCOL;
00819                         } else {
00820                                 /* Success */
00821                                                                 
00822                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
00823                                 frame_desc->status = 0;
00824                         }
00825 
00826                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
00827                                 /*
00828                                  * urb->status is not used for isoc transfers here.
00829                                  * The individual frame_desc status are used instead.
00830                                  */
00831 
00832                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
00833                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
00834 
00835                                 /* 
00836                                  * This check is necessary because urb_dequeue can be called 
00837                                  * from urb complete callback(sound driver example).
00838                                  * All pending URBs are dequeued there, so no need for
00839                                  * further processing.
00840                                  */
00841                                 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {   
00842                                         return;
00843                                 }
00844 
00845                                 urb_compl = 1;
00846 
00847                         }
00848 
00849                         qh->ntd--;
00850 
00851                         /* Stop if IOC requested descriptor reached */
00852                         if (dma_desc->status.b_isoc.ioc) {
00853                                 idx = desclist_idx_inc(idx, qh->interval, hc->speed);   
00854                                 goto stop_scan;
00855                         }
00856 
00857                         idx = desclist_idx_inc(idx, qh->interval, hc->speed);
00858 
00859                         if (urb_compl)
00860                                 break;
00861                 }
00862                 while (idx != qh->td_first);
00863         }
00864 stop_scan:
00865         qh->td_first = idx;
00866 }
00867 
00868 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
00869                                        dwc_hc_t * hc,
00870                                        dwc_otg_qtd_t * qtd,
00871                                        dwc_otg_host_dma_desc_t * dma_desc,
00872                                        dwc_otg_halt_status_e halt_status,
00873                                        uint32_t n_bytes, uint8_t * xfer_done)
00874 {
00875 
00876         uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
00877         dwc_otg_hcd_urb_t *urb = qtd->urb;
00878 
00879         if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
00880                 urb->status = -DWC_E_IO;
00881                 return 1;
00882         }
00883         if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
00884                 switch (halt_status) {
00885                 case DWC_OTG_HC_XFER_STALL:
00886                         urb->status = -DWC_E_PIPE;
00887                         break;
00888                 case DWC_OTG_HC_XFER_BABBLE_ERR:
00889                         urb->status = -DWC_E_OVERFLOW;
00890                         break;
00891                 case DWC_OTG_HC_XFER_XACT_ERR:
00892                         urb->status = -DWC_E_PROTOCOL;
00893                         break;
00894                 default:        
00895                         DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
00896                                   halt_status);
00897                         break;
00898                 }
00899                 return 1;
00900         }
00901 
00902         if (dma_desc->status.b.a == 1) {
00903                 DWC_DEBUGPL(DBG_HCDV,
00904                             "Active descriptor encountered on channel %d\n",
00905                             hc->hc_num);
00906                 return 0;
00907         }
00908 
00909         if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
00910                 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
00911                         urb->actual_length += n_bytes - remain;
00912                         if (remain || urb->actual_length == urb->length) {
00913                                 /* 
00914                                  * For Control Data stage do not set urb->status=0 to prevent
00915                                  * URB callback. Set it when Status phase done. See below.
00916                                  */
00917                                 *xfer_done = 1;
00918                         }
00919 
00920                 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
00921                         urb->status = 0;
00922                         *xfer_done = 1;
00923                 }
00924                 /* No handling for SETUP stage */
00925         } else {
00926                 /* BULK and INTR */
00927                 urb->actual_length += n_bytes - remain;
00928                 if (remain || urb->actual_length == urb->length) {
00929                         urb->status = 0;
00930                         *xfer_done = 1;
00931                 }
00932         }
00933 
00934         return 0;
00935 }
00936 
00937 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
00938                                         dwc_hc_t * hc,
00939                                         dwc_otg_hc_regs_t * hc_regs,
00940                                         dwc_otg_halt_status_e halt_status)
00941 {
00942         dwc_otg_hcd_urb_t *urb = NULL;
00943         dwc_otg_qtd_t *qtd, *qtd_tmp;
00944         dwc_otg_qh_t *qh;
00945         dwc_otg_host_dma_desc_t *dma_desc;
00946         uint32_t n_bytes, n_desc, i;
00947         uint8_t failed = 0, xfer_done;
00948 
00949         n_desc = 0;
00950 
00951         qh = hc->qh;
00952 
00953         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
00954                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
00955                         qtd->in_process = 0;
00956                 }
00957                 return;
00958         }
00959 
00960         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
00961 
00962                 urb = qtd->urb;
00963 
00964                 n_bytes = 0;
00965                 xfer_done = 0;
00966 
00967                 for (i = 0; i < qtd->n_desc; i++) {
00968                         dma_desc = &qh->desc_list[n_desc];
00969 
00970                         n_bytes = qh->n_bytes[n_desc];
00971 
00972                         failed =
00973                             update_non_isoc_urb_state_ddma(hcd, hc, qtd,
00974                                                            dma_desc,
00975                                                            halt_status, n_bytes,
00976                                                            &xfer_done);
00977 
00978                         if (failed
00979                             || (xfer_done
00980                                 && (urb->status != -DWC_E_IN_PROGRESS))) {
00981 
00982                                 hcd->fops->complete(hcd, urb->priv, urb,
00983                                                     urb->status);
00984                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
00985 
00986                                 if (failed)
00987                                         goto stop_scan;
00988                         } else if (qh->ep_type == UE_CONTROL) {
00989                                 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
00990                                         if (urb->length > 0) {
00991                                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
00992                                         } else {
00993                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
00994                                         }
00995                                         DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
00996                                 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
00997                                         if (xfer_done) {
00998                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
00999                                                 DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
01000                                         } else if (i + 1 == qtd->n_desc) {
01001                                                 /* 
01002                                                  * Last descriptor for Control data stage which is
01003                                                  * not completed yet.
01004                                                  */
01005                                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
01006                                         }
01007                                 }
01008                         }
01009 
01010                         n_desc++;
01011                 }
01012 
01013         }
01014 
01015 stop_scan:
01016 
01017         if (qh->ep_type != UE_CONTROL) {
01018                 /* 
01019                  * Resetting the data toggle for bulk
01020                  * and interrupt endpoints in case of stall. See handle_hc_stall_intr() 
01021                  */
01022                 if (halt_status == DWC_OTG_HC_XFER_STALL)
01023                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
01024                 else
01025                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
01026         }
01027 
01028         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
01029                 hcint_data_t hcint;
01030                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
01031                 if (hcint.b.nyet) {
01032                         /*
01033                          * Got a NYET on the last transaction of the transfer. It
01034                          * means that the endpoint should be in the PING state at the
01035                          * beginning of the next transfer.
01036                          */
01037                         qh->ping_state = 1;
01038                         clear_hc_int(hc_regs, nyet);
01039                 }
01040 
01041         }
01042 
01043 }
01044 
01062 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
01063                                     dwc_hc_t * hc,
01064                                     dwc_otg_hc_regs_t * hc_regs,
01065                                     dwc_otg_halt_status_e halt_status)
01066 {
01067         uint8_t continue_isoc_xfer = 0;
01068         dwc_otg_transaction_type_e tr_type;
01069         dwc_otg_qh_t *qh = hc->qh;
01070 
01071         if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
01072 
01073                 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
01074 
01075                 /* Release the channel if halted or session completed */
01076                 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
01077                     DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
01078 
01079                         /* Halt the channel if session completed */
01080                         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
01081                                 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
01082                         }
01083 
01084                         release_channel_ddma(hcd, qh);
01085                         dwc_otg_hcd_qh_remove(hcd, qh);
01086                 } else {
01087                         /* Keep in assigned schedule to continue transfer */
01088                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
01089                                            &qh->qh_list_entry);
01090                         continue_isoc_xfer = 1;
01091 
01092                 }
01096         } else {
01097                 /* Scan descriptor list to complete the URB(s), then release the channel */
01098                 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
01099 
01100                 release_channel_ddma(hcd, qh);
01101                 dwc_otg_hcd_qh_remove(hcd, qh);
01102 
01103                 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
01104                         /* Add back to inactive non-periodic schedule on normal completion */
01105                         dwc_otg_hcd_qh_add(hcd, qh);
01106                 }
01107 
01108         }
01109         tr_type = dwc_otg_hcd_select_transactions(hcd);
01110         if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
01111                 if (continue_isoc_xfer) {
01112                         if (tr_type == DWC_OTG_TRANSACTION_NONE) {
01113                                 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
01114                         } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
01115                                 tr_type = DWC_OTG_TRANSACTION_ALL;
01116                         }
01117                 }
01118                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
01119         }
01120 }
01121 
01122 #endif /* DWC_DEVICE_ONLY */

Generated on Thu Oct 27 03:56:37 2011 for DesignWare USB 2.0 OTG Controller (DWC_otg) Device Driver by  doxygen 1.3.9.1